2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <linux/refcount.h>
42 #include <linux/completion.h>
43 #include <net/tc_act/tc_mirred.h>
44 #include <net/tc_act/tc_vlan.h>
45 #include <net/tc_act/tc_tunnel_key.h>
46 #include <net/tc_act/tc_pedit.h>
47 #include <net/tc_act/tc_csum.h>
49 #include <net/ipv6_stubs.h>
54 #include "esw/chains.h"
57 #include "en/tc_tun.h"
58 #include "en/mapping.h"
60 #include "lib/devcom.h"
61 #include "lib/geneve.h"
62 #include "diag/en_tc_tracepoint.h"
64 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
66 struct mlx5_nic_flow_attr {
69 struct mlx5_modify_hdr *modify_hdr;
72 struct mlx5_flow_table *hairpin_ft;
73 struct mlx5_fc *counter;
76 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
79 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
80 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
81 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
82 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
83 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
84 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
85 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
86 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
87 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
88 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
89 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
90 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
91 MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
94 #define MLX5E_TC_MAX_SPLITS 1
96 /* Helper struct for accessing a struct containing list_head array.
100 * |- list_head item 0
103 * |- list_head item 1
105 * To access the containing struct from one of the list_head items:
106 * 1. Get the helper item from the list_head item using
108 * container_of(list_head item, helper struct type, list_head field)
109 * 2. Get the contining struct from the helper item and its index in the array:
110 * containing struct =
111 * container_of(helper item, containing struct type, helper field[index])
113 struct encap_flow_item {
114 struct mlx5e_encap_entry *e; /* attached encap instance */
115 struct list_head list;
119 struct mlx5e_tc_flow {
120 struct rhash_head node;
121 struct mlx5e_priv *priv;
124 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
125 /* Flow can be associated with multiple encap IDs.
126 * The number of encaps is bounded by the number of supported
129 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
130 struct mlx5e_tc_flow *peer_flow;
131 struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
132 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
133 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
134 struct list_head hairpin; /* flows sharing the same hairpin */
135 struct list_head peer; /* flows with peer flow */
136 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
138 struct list_head tmp_list; /* temporary flow list used by neigh update */
140 struct rcu_head rcu_head;
141 struct completion init_done;
142 int tunnel_id; /* the mapped tunnel id of this flow */
145 struct mlx5_esw_flow_attr esw_attr[0];
146 struct mlx5_nic_flow_attr nic_attr[0];
150 struct mlx5e_tc_flow_parse_attr {
151 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
152 struct net_device *filter_dev;
153 struct mlx5_flow_spec spec;
154 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
155 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
158 #define MLX5E_TC_TABLE_NUM_GROUPS 4
159 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
161 struct tunnel_match_key {
162 struct flow_dissector_key_control enc_control;
163 struct flow_dissector_key_keyid enc_key_id;
164 struct flow_dissector_key_ports enc_tp;
165 struct flow_dissector_key_ip enc_ip;
167 struct flow_dissector_key_ipv4_addrs enc_ipv4;
168 struct flow_dissector_key_ipv6_addrs enc_ipv6;
174 /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
175 * Upper TUNNEL_INFO_BITS for general tunnel info.
176 * Lower ENC_OPTS_BITS bits for enc_opts.
178 #define TUNNEL_INFO_BITS 6
179 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
180 #define ENC_OPTS_BITS 2
181 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
182 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
183 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
185 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
187 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
192 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
195 .soffset = MLX5_BYTE_OFF(fte_match_param,
196 misc_parameters_2.metadata_reg_c_1),
198 [ZONE_TO_REG] = zone_to_reg_ct,
199 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
200 [MARK_TO_REG] = mark_to_reg_ct,
201 [LABELS_TO_REG] = labels_to_reg_ct,
202 [FTEID_TO_REG] = fteid_to_reg_ct,
203 [TUPLEID_TO_REG] = tupleid_to_reg_ct,
206 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
209 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
210 enum mlx5e_tc_attr_to_reg type,
214 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
215 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
216 void *headers_c = spec->match_criteria;
217 void *headers_v = spec->match_value;
220 fmask = headers_c + soffset;
221 fval = headers_v + soffset;
223 mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
224 data = cpu_to_be32(data) >> (32 - (match_len * 8));
226 memcpy(fmask, &mask, match_len);
227 memcpy(fval, &data, match_len);
229 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
233 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
234 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
235 enum mlx5e_tc_attr_to_reg type,
238 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
239 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
240 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
244 err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
249 modact = mod_hdr_acts->actions +
250 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
252 /* Firmware has 5bit length field and 0 means 32bits */
256 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
257 MLX5_SET(set_action_in, modact, field, mfield);
258 MLX5_SET(set_action_in, modact, offset, moffset * 8);
259 MLX5_SET(set_action_in, modact, length, mlen * 8);
260 MLX5_SET(set_action_in, modact, data, data);
261 mod_hdr_acts->num_actions++;
266 struct mlx5e_hairpin {
267 struct mlx5_hairpin *pair;
269 struct mlx5_core_dev *func_mdev;
270 struct mlx5e_priv *func_priv;
275 struct mlx5e_rqt indir_rqt;
276 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
277 struct mlx5e_ttc_table ttc;
280 struct mlx5e_hairpin_entry {
281 /* a node of a hash table which keeps all the hairpin entries */
282 struct hlist_node hairpin_hlist;
284 /* protects flows list */
285 spinlock_t flows_lock;
286 /* flows sharing the same hairpin */
287 struct list_head flows;
288 /* hpe's that were not fully initialized when dead peer update event
289 * function traversed them.
291 struct list_head dead_peer_wait_list;
295 struct mlx5e_hairpin *hp;
297 struct completion res_ready;
305 struct mlx5e_mod_hdr_entry {
306 /* a node of a hash table which keeps all the mod_hdr entries */
307 struct hlist_node mod_hdr_hlist;
309 /* protects flows list */
310 spinlock_t flows_lock;
311 /* flows sharing the same mod_hdr entry */
312 struct list_head flows;
314 struct mod_hdr_key key;
316 struct mlx5_modify_hdr *modify_hdr;
319 struct completion res_ready;
323 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
324 struct mlx5e_tc_flow *flow);
326 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
328 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
329 return ERR_PTR(-EINVAL);
333 static void mlx5e_flow_put(struct mlx5e_priv *priv,
334 struct mlx5e_tc_flow *flow)
336 if (refcount_dec_and_test(&flow->refcnt)) {
337 mlx5e_tc_del_flow(priv, flow);
338 kfree_rcu(flow, rcu_head);
342 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
344 /* Complete all memory stores before setting bit. */
345 smp_mb__before_atomic();
346 set_bit(flag, &flow->flags);
349 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
351 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
354 /* test_and_set_bit() provides all necessary barriers */
355 return test_and_set_bit(flag, &flow->flags);
358 #define flow_flag_test_and_set(flow, flag) \
359 __flow_flag_test_and_set(flow, \
360 MLX5E_TC_FLOW_FLAG_##flag)
362 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
364 /* Complete all memory stores before clearing bit. */
365 smp_mb__before_atomic();
366 clear_bit(flag, &flow->flags);
369 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
370 MLX5E_TC_FLOW_FLAG_##flag)
372 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
374 bool ret = test_bit(flag, &flow->flags);
376 /* Read fields of flow structure only after checking flags. */
377 smp_mb__after_atomic();
381 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
382 MLX5E_TC_FLOW_FLAG_##flag)
384 static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
386 return flow_flag_test(flow, ESWITCH);
389 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
391 return flow_flag_test(flow, FT);
394 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
396 return flow_flag_test(flow, OFFLOADED);
399 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
401 return jhash(key->actions,
402 key->num_actions * MLX5_MH_ACT_SZ, 0);
405 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
406 struct mod_hdr_key *b)
408 if (a->num_actions != b->num_actions)
411 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
414 static struct mod_hdr_tbl *
415 get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
417 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
419 return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
420 &priv->fs.tc.mod_hdr;
423 static struct mlx5e_mod_hdr_entry *
424 mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
426 struct mlx5e_mod_hdr_entry *mh, *found = NULL;
428 hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
429 if (!cmp_mod_hdr_info(&mh->key, key)) {
430 refcount_inc(&mh->refcnt);
439 static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
440 struct mlx5e_mod_hdr_entry *mh,
443 struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
445 if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
447 hash_del(&mh->mod_hdr_hlist);
448 mutex_unlock(&tbl->lock);
450 WARN_ON(!list_empty(&mh->flows));
451 if (mh->compl_result > 0)
452 mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
457 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
459 return mlx5e_is_eswitch_flow(flow) ?
460 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
462 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
463 struct mlx5e_tc_flow *flow,
464 struct mlx5e_tc_flow_parse_attr *parse_attr)
466 int num_actions, actions_size, namespace, err;
467 struct mlx5e_mod_hdr_entry *mh;
468 struct mod_hdr_tbl *tbl;
469 struct mod_hdr_key key;
472 num_actions = parse_attr->mod_hdr_acts.num_actions;
473 actions_size = MLX5_MH_ACT_SZ * num_actions;
475 key.actions = parse_attr->mod_hdr_acts.actions;
476 key.num_actions = num_actions;
478 hash_key = hash_mod_hdr_info(&key);
480 namespace = get_flow_name_space(flow);
481 tbl = get_mod_hdr_table(priv, namespace);
483 mutex_lock(&tbl->lock);
484 mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
486 mutex_unlock(&tbl->lock);
487 wait_for_completion(&mh->res_ready);
489 if (mh->compl_result < 0) {
491 goto attach_header_err;
496 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
498 mutex_unlock(&tbl->lock);
502 mh->key.actions = (void *)mh + sizeof(*mh);
503 memcpy(mh->key.actions, key.actions, actions_size);
504 mh->key.num_actions = num_actions;
505 spin_lock_init(&mh->flows_lock);
506 INIT_LIST_HEAD(&mh->flows);
507 refcount_set(&mh->refcnt, 1);
508 init_completion(&mh->res_ready);
510 hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
511 mutex_unlock(&tbl->lock);
513 mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
516 if (IS_ERR(mh->modify_hdr)) {
517 err = PTR_ERR(mh->modify_hdr);
518 mh->compl_result = err;
519 goto alloc_header_err;
521 mh->compl_result = 1;
522 complete_all(&mh->res_ready);
526 spin_lock(&mh->flows_lock);
527 list_add(&flow->mod_hdr, &mh->flows);
528 spin_unlock(&mh->flows_lock);
529 if (mlx5e_is_eswitch_flow(flow))
530 flow->esw_attr->modify_hdr = mh->modify_hdr;
532 flow->nic_attr->modify_hdr = mh->modify_hdr;
537 complete_all(&mh->res_ready);
539 mlx5e_mod_hdr_put(priv, mh, namespace);
543 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
544 struct mlx5e_tc_flow *flow)
546 /* flow wasn't fully initialized */
550 spin_lock(&flow->mh->flows_lock);
551 list_del(&flow->mod_hdr);
552 spin_unlock(&flow->mh->flows_lock);
554 mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
559 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
561 struct net_device *netdev;
562 struct mlx5e_priv *priv;
564 netdev = __dev_get_by_index(net, ifindex);
565 priv = netdev_priv(netdev);
569 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
571 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
575 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
579 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
581 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
582 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
583 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
585 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
592 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
597 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
599 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
600 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
603 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
605 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
606 struct mlx5e_priv *priv = hp->func_priv;
607 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
609 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
612 for (i = 0; i < sz; i++) {
614 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
615 ix = mlx5e_bits_invert(i, ilog2(sz));
616 ix = indirection_rqt[ix];
617 rqn = hp->pair->rqn[ix];
618 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
622 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
624 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
625 struct mlx5e_priv *priv = hp->func_priv;
626 struct mlx5_core_dev *mdev = priv->mdev;
630 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
631 in = kvzalloc(inlen, GFP_KERNEL);
635 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
637 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
638 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
640 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
642 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
644 hp->indir_rqt.enabled = true;
650 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
652 struct mlx5e_priv *priv = hp->func_priv;
653 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
657 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
658 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
660 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
661 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
663 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
664 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
665 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
666 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
668 err = mlx5_core_create_tir(hp->func_mdev, in,
669 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
671 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
672 goto err_destroy_tirs;
678 for (i = 0; i < tt; i++)
679 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
683 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
687 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
688 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
691 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
692 struct ttc_params *ttc_params)
694 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
697 memset(ttc_params, 0, sizeof(*ttc_params));
699 ttc_params->any_tt_tirn = hp->tirn;
701 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
702 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
704 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
705 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
706 ft_attr->prio = MLX5E_TC_PRIO;
709 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
711 struct mlx5e_priv *priv = hp->func_priv;
712 struct ttc_params ttc_params;
715 err = mlx5e_hairpin_create_indirect_rqt(hp);
719 err = mlx5e_hairpin_create_indirect_tirs(hp);
721 goto err_create_indirect_tirs;
723 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
724 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
726 goto err_create_ttc_table;
728 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
729 hp->num_channels, hp->ttc.ft.t->id);
733 err_create_ttc_table:
734 mlx5e_hairpin_destroy_indirect_tirs(hp);
735 err_create_indirect_tirs:
736 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
741 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
743 struct mlx5e_priv *priv = hp->func_priv;
745 mlx5e_destroy_ttc_table(priv, &hp->ttc);
746 mlx5e_hairpin_destroy_indirect_tirs(hp);
747 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
750 static struct mlx5e_hairpin *
751 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
754 struct mlx5_core_dev *func_mdev, *peer_mdev;
755 struct mlx5e_hairpin *hp;
756 struct mlx5_hairpin *pair;
759 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
761 return ERR_PTR(-ENOMEM);
763 func_mdev = priv->mdev;
764 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
766 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
769 goto create_pair_err;
772 hp->func_mdev = func_mdev;
773 hp->func_priv = priv;
774 hp->num_channels = params->num_channels;
776 err = mlx5e_hairpin_create_transport(hp);
778 goto create_transport_err;
780 if (hp->num_channels > 1) {
781 err = mlx5e_hairpin_rss_init(hp);
789 mlx5e_hairpin_destroy_transport(hp);
790 create_transport_err:
791 mlx5_core_hairpin_destroy(hp->pair);
797 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
799 if (hp->num_channels > 1)
800 mlx5e_hairpin_rss_cleanup(hp);
801 mlx5e_hairpin_destroy_transport(hp);
802 mlx5_core_hairpin_destroy(hp->pair);
806 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
808 return (peer_vhca_id << 16 | prio);
811 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
812 u16 peer_vhca_id, u8 prio)
814 struct mlx5e_hairpin_entry *hpe;
815 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
817 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
818 hairpin_hlist, hash_key) {
819 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
820 refcount_inc(&hpe->refcnt);
828 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
829 struct mlx5e_hairpin_entry *hpe)
831 /* no more hairpin flows for us, release the hairpin pair */
832 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
834 hash_del(&hpe->hairpin_hlist);
835 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
837 if (!IS_ERR_OR_NULL(hpe->hp)) {
838 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
839 dev_name(hpe->hp->pair->peer_mdev->device));
841 mlx5e_hairpin_destroy(hpe->hp);
844 WARN_ON(!list_empty(&hpe->flows));
848 #define UNKNOWN_MATCH_PRIO 8
850 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
851 struct mlx5_flow_spec *spec, u8 *match_prio,
852 struct netlink_ext_ack *extack)
854 void *headers_c, *headers_v;
855 u8 prio_val, prio_mask = 0;
858 #ifdef CONFIG_MLX5_CORE_EN_DCB
859 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
860 NL_SET_ERR_MSG_MOD(extack,
861 "only PCP trust state supported for hairpin");
865 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
866 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
868 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
870 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
871 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
874 if (!vlan_present || !prio_mask) {
875 prio_val = UNKNOWN_MATCH_PRIO;
876 } else if (prio_mask != 0x7) {
877 NL_SET_ERR_MSG_MOD(extack,
878 "masked priority match not supported for hairpin");
882 *match_prio = prio_val;
886 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
887 struct mlx5e_tc_flow *flow,
888 struct mlx5e_tc_flow_parse_attr *parse_attr,
889 struct netlink_ext_ack *extack)
891 int peer_ifindex = parse_attr->mirred_ifindex[0];
892 struct mlx5_hairpin_params params;
893 struct mlx5_core_dev *peer_mdev;
894 struct mlx5e_hairpin_entry *hpe;
895 struct mlx5e_hairpin *hp;
902 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
903 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
904 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
908 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
909 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
914 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
915 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
917 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
918 wait_for_completion(&hpe->res_ready);
920 if (IS_ERR(hpe->hp)) {
927 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
929 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
933 spin_lock_init(&hpe->flows_lock);
934 INIT_LIST_HEAD(&hpe->flows);
935 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
936 hpe->peer_vhca_id = peer_id;
937 hpe->prio = match_prio;
938 refcount_set(&hpe->refcnt, 1);
939 init_completion(&hpe->res_ready);
941 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
942 hash_hairpin_info(peer_id, match_prio));
943 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
945 params.log_data_size = 15;
946 params.log_data_size = min_t(u8, params.log_data_size,
947 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
948 params.log_data_size = max_t(u8, params.log_data_size,
949 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
951 params.log_num_packets = params.log_data_size -
952 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
953 params.log_num_packets = min_t(u8, params.log_num_packets,
954 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
956 params.q_counter = priv->q_counter;
957 /* set hairpin pair per each 50Gbs share of the link */
958 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
959 link_speed = max_t(u32, link_speed, 50000);
960 link_speed64 = link_speed;
961 do_div(link_speed64, 50000);
962 params.num_channels = link_speed64;
964 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
966 complete_all(&hpe->res_ready);
972 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
973 hp->tirn, hp->pair->rqn[0],
974 dev_name(hp->pair->peer_mdev->device),
975 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
978 if (hpe->hp->num_channels > 1) {
979 flow_flag_set(flow, HAIRPIN_RSS);
980 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
982 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
986 spin_lock(&hpe->flows_lock);
987 list_add(&flow->hairpin, &hpe->flows);
988 spin_unlock(&hpe->flows_lock);
993 mlx5e_hairpin_put(priv, hpe);
997 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
998 struct mlx5e_tc_flow *flow)
1000 /* flow wasn't fully initialized */
1004 spin_lock(&flow->hpe->flows_lock);
1005 list_del(&flow->hairpin);
1006 spin_unlock(&flow->hpe->flows_lock);
1008 mlx5e_hairpin_put(priv, flow->hpe);
1013 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1014 struct mlx5e_tc_flow_parse_attr *parse_attr,
1015 struct mlx5e_tc_flow *flow,
1016 struct netlink_ext_ack *extack)
1018 struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
1019 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1020 struct mlx5_core_dev *dev = priv->mdev;
1021 struct mlx5_flow_destination dest[2] = {};
1022 struct mlx5_flow_act flow_act = {
1023 .action = attr->action,
1024 .flags = FLOW_ACT_NO_APPEND,
1026 struct mlx5_fc *counter = NULL;
1027 int err, dest_ix = 0;
1029 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1030 flow_context->flow_tag = attr->flow_tag;
1032 if (flow_flag_test(flow, HAIRPIN)) {
1033 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1037 if (flow_flag_test(flow, HAIRPIN_RSS)) {
1038 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1039 dest[dest_ix].ft = attr->hairpin_ft;
1041 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1042 dest[dest_ix].tir_num = attr->hairpin_tirn;
1045 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1046 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1047 dest[dest_ix].ft = priv->fs.vlan.ft.t;
1051 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1052 counter = mlx5_fc_create(dev, true);
1053 if (IS_ERR(counter))
1054 return PTR_ERR(counter);
1056 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1057 dest[dest_ix].counter_id = mlx5_fc_id(counter);
1059 attr->counter = counter;
1062 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1063 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1064 flow_act.modify_hdr = attr->modify_hdr;
1065 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1070 mutex_lock(&priv->fs.tc.t_lock);
1071 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
1072 struct mlx5_flow_table_attr ft_attr = {};
1073 int tc_grp_size, tc_tbl_size, tc_num_grps;
1074 u32 max_flow_counter;
1076 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1077 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1079 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
1081 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
1082 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
1083 tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
1085 ft_attr.prio = MLX5E_TC_PRIO;
1086 ft_attr.max_fte = tc_tbl_size;
1087 ft_attr.level = MLX5E_TC_FT_LEVEL;
1088 ft_attr.autogroup.max_num_groups = tc_num_grps;
1090 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
1092 if (IS_ERR(priv->fs.tc.t)) {
1093 mutex_unlock(&priv->fs.tc.t_lock);
1094 NL_SET_ERR_MSG_MOD(extack,
1095 "Failed to create tc offload table\n");
1096 netdev_err(priv->netdev,
1097 "Failed to create tc offload table\n");
1098 return PTR_ERR(priv->fs.tc.t);
1102 if (attr->match_level != MLX5_MATCH_NONE)
1103 parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1105 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
1106 &flow_act, dest, dest_ix);
1107 mutex_unlock(&priv->fs.tc.t_lock);
1109 return PTR_ERR_OR_ZERO(flow->rule[0]);
1112 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1113 struct mlx5e_tc_flow *flow)
1115 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1116 struct mlx5_fc *counter = NULL;
1118 counter = attr->counter;
1119 if (!IS_ERR_OR_NULL(flow->rule[0]))
1120 mlx5_del_flow_rules(flow->rule[0]);
1121 mlx5_fc_destroy(priv->mdev, counter);
1123 mutex_lock(&priv->fs.tc.t_lock);
1124 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1125 mlx5_destroy_flow_table(priv->fs.tc.t);
1126 priv->fs.tc.t = NULL;
1128 mutex_unlock(&priv->fs.tc.t_lock);
1130 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1131 mlx5e_detach_mod_hdr(priv, flow);
1133 if (flow_flag_test(flow, HAIRPIN))
1134 mlx5e_hairpin_flow_del(priv, flow);
1137 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1138 struct mlx5e_tc_flow *flow, int out_index);
1140 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1141 struct mlx5e_tc_flow *flow,
1142 struct net_device *mirred_dev,
1144 struct netlink_ext_ack *extack,
1145 struct net_device **encap_dev,
1148 static struct mlx5_flow_handle *
1149 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1150 struct mlx5e_tc_flow *flow,
1151 struct mlx5_flow_spec *spec,
1152 struct mlx5_esw_flow_attr *attr)
1154 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1155 struct mlx5_flow_handle *rule;
1157 if (flow_flag_test(flow, CT)) {
1158 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1160 return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
1164 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1168 if (attr->split_count) {
1169 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1170 if (IS_ERR(flow->rule[1])) {
1171 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1172 return flow->rule[1];
1180 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1181 struct mlx5e_tc_flow *flow,
1182 struct mlx5_esw_flow_attr *attr)
1184 flow_flag_clear(flow, OFFLOADED);
1186 if (flow_flag_test(flow, CT)) {
1187 mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
1191 if (attr->split_count)
1192 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1194 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1197 static struct mlx5_flow_handle *
1198 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1199 struct mlx5e_tc_flow *flow,
1200 struct mlx5_flow_spec *spec)
1202 struct mlx5_esw_flow_attr slow_attr;
1203 struct mlx5_flow_handle *rule;
1205 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1206 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1207 slow_attr.split_count = 0;
1208 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1210 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
1212 flow_flag_set(flow, SLOW);
1218 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1219 struct mlx5e_tc_flow *flow)
1221 struct mlx5_esw_flow_attr slow_attr;
1223 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1224 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1225 slow_attr.split_count = 0;
1226 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1227 mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1228 flow_flag_clear(flow, SLOW);
1231 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1234 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1235 struct list_head *unready_flows)
1237 flow_flag_set(flow, NOT_READY);
1238 list_add_tail(&flow->unready, unready_flows);
1241 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1244 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1246 list_del(&flow->unready);
1247 flow_flag_clear(flow, NOT_READY);
1250 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1252 struct mlx5_rep_uplink_priv *uplink_priv;
1253 struct mlx5e_rep_priv *rpriv;
1254 struct mlx5_eswitch *esw;
1256 esw = flow->priv->mdev->priv.eswitch;
1257 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1258 uplink_priv = &rpriv->uplink_priv;
1260 mutex_lock(&uplink_priv->unready_flows_lock);
1261 unready_flow_add(flow, &uplink_priv->unready_flows);
1262 mutex_unlock(&uplink_priv->unready_flows_lock);
1265 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1267 struct mlx5_rep_uplink_priv *uplink_priv;
1268 struct mlx5e_rep_priv *rpriv;
1269 struct mlx5_eswitch *esw;
1271 esw = flow->priv->mdev->priv.eswitch;
1272 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1273 uplink_priv = &rpriv->uplink_priv;
1275 mutex_lock(&uplink_priv->unready_flows_lock);
1276 unready_flow_del(flow);
1277 mutex_unlock(&uplink_priv->unready_flows_lock);
1281 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1282 struct mlx5e_tc_flow *flow,
1283 struct netlink_ext_ack *extack)
1285 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1286 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1287 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1288 struct net_device *out_dev, *encap_dev = NULL;
1289 struct mlx5_fc *counter = NULL;
1290 struct mlx5e_rep_priv *rpriv;
1291 struct mlx5e_priv *out_priv;
1292 bool encap_valid = true;
1293 u32 max_prio, max_chain;
1297 if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
1298 NL_SET_ERR_MSG_MOD(extack,
1299 "E-switch priorities unsupported, upgrade FW");
1303 /* We check chain range only for tc flows.
1304 * For ft flows, we checked attr->chain was originally 0 and set it to
1305 * FDB_FT_CHAIN which is outside tc range.
1306 * See mlx5e_rep_setup_ft_cb().
1308 max_chain = mlx5_esw_chains_get_chain_range(esw);
1309 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1310 NL_SET_ERR_MSG_MOD(extack,
1311 "Requested chain is out of supported range");
1315 max_prio = mlx5_esw_chains_get_prio_range(esw);
1316 if (attr->prio > max_prio) {
1317 NL_SET_ERR_MSG_MOD(extack,
1318 "Requested priority is out of supported range");
1322 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1325 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1328 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1329 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1331 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1332 extack, &encap_dev, &encap_valid);
1336 out_priv = netdev_priv(encap_dev);
1337 rpriv = out_priv->ppriv;
1338 attr->dests[out_index].rep = rpriv->rep;
1339 attr->dests[out_index].mdev = out_priv->mdev;
1342 err = mlx5_eswitch_add_vlan_action(esw, attr);
1346 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1347 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1348 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1353 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1354 counter = mlx5_fc_create(attr->counter_dev, true);
1355 if (IS_ERR(counter))
1356 return PTR_ERR(counter);
1358 attr->counter = counter;
1361 /* we get here if one of the following takes place:
1362 * (1) there's no error
1363 * (2) there's an encap action and we don't have valid neigh
1366 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1368 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1370 if (IS_ERR(flow->rule[0]))
1371 return PTR_ERR(flow->rule[0]);
1373 flow_flag_set(flow, OFFLOADED);
1378 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1380 struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
1381 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1384 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1386 geneve_tlv_option_0_data);
1388 return !!geneve_tlv_opt_0_data;
1391 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1392 struct mlx5e_tc_flow *flow)
1394 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1395 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1398 mlx5e_put_flow_tunnel_id(flow);
1400 if (flow_flag_test(flow, NOT_READY)) {
1401 remove_unready_flow(flow);
1402 kvfree(attr->parse_attr);
1406 if (mlx5e_is_offloaded_flow(flow)) {
1407 if (flow_flag_test(flow, SLOW))
1408 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1410 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1413 if (mlx5_flow_has_geneve_opt(flow))
1414 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1416 mlx5_eswitch_del_vlan_action(esw, attr);
1418 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1419 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1420 mlx5e_detach_encap(priv, flow, out_index);
1421 kfree(attr->parse_attr->tun_info[out_index]);
1423 kvfree(attr->parse_attr);
1425 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1426 mlx5e_detach_mod_hdr(priv, flow);
1428 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1429 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1432 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1433 struct mlx5e_encap_entry *e,
1434 struct list_head *flow_list)
1436 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1437 struct mlx5_esw_flow_attr *esw_attr;
1438 struct mlx5_flow_handle *rule;
1439 struct mlx5_flow_spec *spec;
1440 struct mlx5e_tc_flow *flow;
1443 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1445 e->encap_size, e->encap_header,
1446 MLX5_FLOW_NAMESPACE_FDB);
1447 if (IS_ERR(e->pkt_reformat)) {
1448 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1449 PTR_ERR(e->pkt_reformat));
1452 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1453 mlx5e_rep_queue_neigh_stats_work(priv);
1455 list_for_each_entry(flow, flow_list, tmp_list) {
1456 bool all_flow_encaps_valid = true;
1459 if (!mlx5e_is_offloaded_flow(flow))
1461 esw_attr = flow->esw_attr;
1462 spec = &esw_attr->parse_attr->spec;
1464 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1465 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1466 /* Flow can be associated with multiple encap entries.
1467 * Before offloading the flow verify that all of them have
1468 * a valid neighbour.
1470 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1471 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1473 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1474 all_flow_encaps_valid = false;
1478 /* Do not offload flows with unresolved neighbors */
1479 if (!all_flow_encaps_valid)
1481 /* update from slow path rule to encap rule */
1482 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1484 err = PTR_ERR(rule);
1485 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1490 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1491 flow->rule[0] = rule;
1492 /* was unset when slow path rule removed */
1493 flow_flag_set(flow, OFFLOADED);
1497 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1498 struct mlx5e_encap_entry *e,
1499 struct list_head *flow_list)
1501 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1502 struct mlx5_flow_handle *rule;
1503 struct mlx5_flow_spec *spec;
1504 struct mlx5e_tc_flow *flow;
1507 list_for_each_entry(flow, flow_list, tmp_list) {
1508 if (!mlx5e_is_offloaded_flow(flow))
1510 spec = &flow->esw_attr->parse_attr->spec;
1512 /* update from encap rule to slow path rule */
1513 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
1514 /* mark the flow's encap dest as non-valid */
1515 flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1518 err = PTR_ERR(rule);
1519 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1524 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1525 flow->rule[0] = rule;
1526 /* was unset when fast path rule removed */
1527 flow_flag_set(flow, OFFLOADED);
1530 /* we know that the encap is valid */
1531 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1532 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1535 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1537 if (mlx5e_is_eswitch_flow(flow))
1538 return flow->esw_attr->counter;
1540 return flow->nic_attr->counter;
1543 /* Takes reference to all flows attached to encap and adds the flows to
1544 * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1546 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1548 struct encap_flow_item *efi;
1549 struct mlx5e_tc_flow *flow;
1551 list_for_each_entry(efi, &e->flows, list) {
1552 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1553 if (IS_ERR(mlx5e_flow_get(flow)))
1555 wait_for_completion(&flow->init_done);
1557 flow->tmp_efi_index = efi->index;
1558 list_add(&flow->tmp_list, flow_list);
1562 /* Iterate over tmp_list of flows attached to flow_list head. */
1563 void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1565 struct mlx5e_tc_flow *flow, *tmp;
1567 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1568 mlx5e_flow_put(priv, flow);
1571 static struct mlx5e_encap_entry *
1572 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1573 struct mlx5e_encap_entry *e)
1575 struct mlx5e_encap_entry *next = NULL;
1580 /* find encap with non-zero reference counter value */
1582 list_next_or_null_rcu(&nhe->encap_list,
1584 struct mlx5e_encap_entry,
1586 list_first_or_null_rcu(&nhe->encap_list,
1587 struct mlx5e_encap_entry,
1590 next = list_next_or_null_rcu(&nhe->encap_list,
1592 struct mlx5e_encap_entry,
1594 if (mlx5e_encap_take(next))
1599 /* release starting encap */
1601 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1605 /* wait for encap to be fully initialized */
1606 wait_for_completion(&next->res_ready);
1607 /* continue searching if encap entry is not in valid state after completion */
1608 if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1616 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1618 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1619 struct mlx5e_encap_entry *e = NULL;
1620 struct mlx5e_tc_flow *flow;
1621 struct mlx5_fc *counter;
1622 struct neigh_table *tbl;
1623 bool neigh_used = false;
1624 struct neighbour *n;
1627 if (m_neigh->family == AF_INET)
1629 #if IS_ENABLED(CONFIG_IPV6)
1630 else if (m_neigh->family == AF_INET6)
1631 tbl = ipv6_stub->nd_tbl;
1636 /* mlx5e_get_next_valid_encap() releases previous encap before returning
1639 while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1640 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1641 struct encap_flow_item *efi, *tmp;
1642 struct mlx5_eswitch *esw;
1643 LIST_HEAD(flow_list);
1645 esw = priv->mdev->priv.eswitch;
1646 mutex_lock(&esw->offloads.encap_tbl_lock);
1647 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1648 flow = container_of(efi, struct mlx5e_tc_flow,
1649 encaps[efi->index]);
1650 if (IS_ERR(mlx5e_flow_get(flow)))
1652 list_add(&flow->tmp_list, &flow_list);
1654 if (mlx5e_is_offloaded_flow(flow)) {
1655 counter = mlx5e_tc_get_counter(flow);
1656 lastuse = mlx5_fc_query_lastuse(counter);
1657 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1663 mutex_unlock(&esw->offloads.encap_tbl_lock);
1665 mlx5e_put_encap_flow_list(priv, &flow_list);
1667 /* release current encap before breaking the loop */
1668 mlx5e_encap_put(priv, e);
1673 trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1676 nhe->reported_lastuse = jiffies;
1678 /* find the relevant neigh according to the cached device and
1681 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1685 neigh_event_send(n, NULL);
1690 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1692 WARN_ON(!list_empty(&e->flows));
1694 if (e->compl_result > 0) {
1695 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1697 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1698 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1702 kfree(e->encap_header);
1706 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1708 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1710 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1712 hash_del_rcu(&e->encap_hlist);
1713 mutex_unlock(&esw->offloads.encap_tbl_lock);
1715 mlx5e_encap_dealloc(priv, e);
1718 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1719 struct mlx5e_tc_flow *flow, int out_index)
1721 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1722 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1724 /* flow wasn't fully initialized */
1728 mutex_lock(&esw->offloads.encap_tbl_lock);
1729 list_del(&flow->encaps[out_index].list);
1730 flow->encaps[out_index].e = NULL;
1731 if (!refcount_dec_and_test(&e->refcnt)) {
1732 mutex_unlock(&esw->offloads.encap_tbl_lock);
1735 hash_del_rcu(&e->encap_hlist);
1736 mutex_unlock(&esw->offloads.encap_tbl_lock);
1738 mlx5e_encap_dealloc(priv, e);
1741 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1743 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1745 if (!flow_flag_test(flow, ESWITCH) ||
1746 !flow_flag_test(flow, DUP))
1749 mutex_lock(&esw->offloads.peer_mutex);
1750 list_del(&flow->peer);
1751 mutex_unlock(&esw->offloads.peer_mutex);
1753 flow_flag_clear(flow, DUP);
1755 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1756 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1757 kfree(flow->peer_flow);
1760 flow->peer_flow = NULL;
1763 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1765 struct mlx5_core_dev *dev = flow->priv->mdev;
1766 struct mlx5_devcom *devcom = dev->priv.devcom;
1767 struct mlx5_eswitch *peer_esw;
1769 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1773 __mlx5e_tc_del_fdb_peer_flow(flow);
1774 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1777 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1778 struct mlx5e_tc_flow *flow)
1780 if (mlx5e_is_eswitch_flow(flow)) {
1781 mlx5e_tc_del_fdb_peer_flow(flow);
1782 mlx5e_tc_del_fdb_flow(priv, flow);
1784 mlx5e_tc_del_nic_flow(priv, flow);
1788 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1790 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1791 struct flow_action *flow_action = &rule->action;
1792 const struct flow_action_entry *act;
1795 flow_action_for_each(i, act, flow_action) {
1797 case FLOW_ACTION_GOTO:
1808 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1809 struct flow_dissector_key_enc_opts *opts,
1810 struct netlink_ext_ack *extack,
1813 struct geneve_opt *opt;
1818 while (opts->len > off) {
1819 opt = (struct geneve_opt *)&opts->data[off];
1821 if (!(*dont_care) || opt->opt_class || opt->type ||
1822 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1825 if (opt->opt_class != U16_MAX ||
1826 opt->type != U8_MAX ||
1827 memchr_inv(opt->opt_data, 0xFF,
1829 NL_SET_ERR_MSG(extack,
1830 "Partial match of tunnel options in chain > 0 isn't supported");
1831 netdev_warn(priv->netdev,
1832 "Partial match of tunnel options in chain > 0 isn't supported");
1837 off += sizeof(struct geneve_opt) + opt->length * 4;
1843 #define COPY_DISSECTOR(rule, diss_key, dst)\
1845 struct flow_rule *__rule = (rule);\
1846 typeof(dst) __dst = dst;\
1849 skb_flow_dissector_target(__rule->match.dissector,\
1851 __rule->match.key),\
1855 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1856 struct mlx5e_tc_flow *flow,
1857 struct flow_cls_offload *f,
1858 struct net_device *filter_dev)
1860 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1861 struct netlink_ext_ack *extack = f->common.extack;
1862 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1863 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1864 struct flow_match_enc_opts enc_opts_match;
1865 struct mlx5_rep_uplink_priv *uplink_priv;
1866 struct mlx5e_rep_priv *uplink_rpriv;
1867 struct tunnel_match_key tunnel_key;
1868 bool enc_opts_is_dont_care = true;
1869 u32 tun_id, enc_opts_id = 0;
1870 struct mlx5_eswitch *esw;
1874 esw = priv->mdev->priv.eswitch;
1875 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1876 uplink_priv = &uplink_rpriv->uplink_priv;
1878 memset(&tunnel_key, 0, sizeof(tunnel_key));
1879 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1880 &tunnel_key.enc_control);
1881 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1882 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1883 &tunnel_key.enc_ipv4);
1885 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1886 &tunnel_key.enc_ipv6);
1887 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1888 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1889 &tunnel_key.enc_tp);
1890 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1891 &tunnel_key.enc_key_id);
1892 tunnel_key.filter_ifindex = filter_dev->ifindex;
1894 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1898 flow_rule_match_enc_opts(rule, &enc_opts_match);
1899 err = enc_opts_is_dont_care_or_full_match(priv,
1900 enc_opts_match.mask,
1902 &enc_opts_is_dont_care);
1906 if (!enc_opts_is_dont_care) {
1907 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1908 enc_opts_match.key, &enc_opts_id);
1913 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1914 mask = enc_opts_id ? TUNNEL_ID_MASK :
1915 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1918 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1919 TUNNEL_TO_REG, value, mask);
1921 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1922 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1924 TUNNEL_TO_REG, value);
1928 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1931 flow->tunnel_id = value;
1936 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1939 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1943 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1945 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1946 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1947 struct mlx5_rep_uplink_priv *uplink_priv;
1948 struct mlx5e_rep_priv *uplink_rpriv;
1949 struct mlx5_eswitch *esw;
1951 esw = flow->priv->mdev->priv.eswitch;
1952 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1953 uplink_priv = &uplink_rpriv->uplink_priv;
1956 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1958 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1962 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1964 return flow->tunnel_id;
1967 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1968 struct mlx5e_tc_flow *flow,
1969 struct mlx5_flow_spec *spec,
1970 struct flow_cls_offload *f,
1971 struct net_device *filter_dev,
1975 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1976 struct netlink_ext_ack *extack = f->common.extack;
1977 bool needs_mapping, sets_mapping;
1980 if (!mlx5e_is_eswitch_flow(flow))
1983 needs_mapping = !!flow->esw_attr->chain;
1984 sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
1985 *match_inner = !needs_mapping;
1987 if ((needs_mapping || sets_mapping) &&
1988 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1989 NL_SET_ERR_MSG(extack,
1990 "Chains on tunnel devices isn't supported without register loopback support");
1991 netdev_warn(priv->netdev,
1992 "Chains on tunnel devices isn't supported without register loopback support");
1996 if (!flow->esw_attr->chain) {
1997 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2000 NL_SET_ERR_MSG_MOD(extack,
2001 "Failed to parse tunnel attributes");
2002 netdev_warn(priv->netdev,
2003 "Failed to parse tunnel attributes");
2007 flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2010 if (!needs_mapping && !sets_mapping)
2013 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2016 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2018 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2022 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2024 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2028 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2030 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2034 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2036 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2040 static void *get_match_headers_value(u32 flags,
2041 struct mlx5_flow_spec *spec)
2043 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2044 get_match_inner_headers_value(spec) :
2045 get_match_outer_headers_value(spec);
2048 static void *get_match_headers_criteria(u32 flags,
2049 struct mlx5_flow_spec *spec)
2051 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2052 get_match_inner_headers_criteria(spec) :
2053 get_match_outer_headers_criteria(spec);
2056 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2057 struct flow_cls_offload *f)
2059 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2060 struct netlink_ext_ack *extack = f->common.extack;
2061 struct net_device *ingress_dev;
2062 struct flow_match_meta match;
2064 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2067 flow_rule_match_meta(rule, &match);
2068 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2069 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2073 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2074 match.key->ingress_ifindex);
2076 NL_SET_ERR_MSG_MOD(extack,
2077 "Can't find the ingress port to match on");
2081 if (ingress_dev != filter_dev) {
2082 NL_SET_ERR_MSG_MOD(extack,
2083 "Can't match on the ingress filter port");
2090 static int __parse_cls_flower(struct mlx5e_priv *priv,
2091 struct mlx5e_tc_flow *flow,
2092 struct mlx5_flow_spec *spec,
2093 struct flow_cls_offload *f,
2094 struct net_device *filter_dev,
2095 u8 *inner_match_level, u8 *outer_match_level)
2097 struct netlink_ext_ack *extack = f->common.extack;
2098 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2100 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2102 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2104 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2106 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2107 struct flow_dissector *dissector = rule->match.dissector;
2113 match_level = outer_match_level;
2115 if (dissector->used_keys &
2116 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2117 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2118 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2119 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2120 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2121 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2122 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2123 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2124 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2125 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2126 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2127 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2128 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2129 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2130 BIT(FLOW_DISSECTOR_KEY_TCP) |
2131 BIT(FLOW_DISSECTOR_KEY_IP) |
2132 BIT(FLOW_DISSECTOR_KEY_CT) |
2133 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2134 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
2135 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2136 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
2137 dissector->used_keys);
2141 if (mlx5e_get_tc_tun(filter_dev)) {
2142 bool match_inner = false;
2144 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2145 outer_match_level, &match_inner);
2150 /* header pointers should point to the inner headers
2151 * if the packet was decapsulated already.
2152 * outer headers are set by parse_tunnel_attr.
2154 match_level = inner_match_level;
2155 headers_c = get_match_inner_headers_criteria(spec);
2156 headers_v = get_match_inner_headers_value(spec);
2160 err = mlx5e_flower_parse_meta(filter_dev, f);
2164 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2165 struct flow_match_basic match;
2167 flow_rule_match_basic(rule, &match);
2168 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2169 ntohs(match.mask->n_proto));
2170 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2171 ntohs(match.key->n_proto));
2173 if (match.mask->n_proto)
2174 *match_level = MLX5_MATCH_L2;
2176 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2177 is_vlan_dev(filter_dev)) {
2178 struct flow_dissector_key_vlan filter_dev_mask;
2179 struct flow_dissector_key_vlan filter_dev_key;
2180 struct flow_match_vlan match;
2182 if (is_vlan_dev(filter_dev)) {
2183 match.key = &filter_dev_key;
2184 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2185 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2186 match.key->vlan_priority = 0;
2187 match.mask = &filter_dev_mask;
2188 memset(match.mask, 0xff, sizeof(*match.mask));
2189 match.mask->vlan_priority = 0;
2191 flow_rule_match_vlan(rule, &match);
2193 if (match.mask->vlan_id ||
2194 match.mask->vlan_priority ||
2195 match.mask->vlan_tpid) {
2196 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2197 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2199 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2202 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2204 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2208 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2209 match.mask->vlan_id);
2210 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2211 match.key->vlan_id);
2213 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2214 match.mask->vlan_priority);
2215 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2216 match.key->vlan_priority);
2218 *match_level = MLX5_MATCH_L2;
2220 } else if (*match_level != MLX5_MATCH_NONE) {
2221 /* cvlan_tag enabled in match criteria and
2222 * disabled in match value means both S & C tags
2223 * don't exist (untagged of both)
2225 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2226 *match_level = MLX5_MATCH_L2;
2229 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2230 struct flow_match_vlan match;
2232 flow_rule_match_cvlan(rule, &match);
2233 if (match.mask->vlan_id ||
2234 match.mask->vlan_priority ||
2235 match.mask->vlan_tpid) {
2236 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2237 MLX5_SET(fte_match_set_misc, misc_c,
2238 outer_second_svlan_tag, 1);
2239 MLX5_SET(fte_match_set_misc, misc_v,
2240 outer_second_svlan_tag, 1);
2242 MLX5_SET(fte_match_set_misc, misc_c,
2243 outer_second_cvlan_tag, 1);
2244 MLX5_SET(fte_match_set_misc, misc_v,
2245 outer_second_cvlan_tag, 1);
2248 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2249 match.mask->vlan_id);
2250 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2251 match.key->vlan_id);
2252 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2253 match.mask->vlan_priority);
2254 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2255 match.key->vlan_priority);
2257 *match_level = MLX5_MATCH_L2;
2261 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2262 struct flow_match_eth_addrs match;
2264 flow_rule_match_eth_addrs(rule, &match);
2265 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2268 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2272 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2275 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2279 if (!is_zero_ether_addr(match.mask->src) ||
2280 !is_zero_ether_addr(match.mask->dst))
2281 *match_level = MLX5_MATCH_L2;
2284 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2285 struct flow_match_control match;
2287 flow_rule_match_control(rule, &match);
2288 addr_type = match.key->addr_type;
2290 /* the HW doesn't support frag first/later */
2291 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2294 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2295 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2296 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2297 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2299 /* the HW doesn't need L3 inline to match on frag=no */
2300 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2301 *match_level = MLX5_MATCH_L2;
2302 /* *** L2 attributes parsing up to here *** */
2304 *match_level = MLX5_MATCH_L3;
2308 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2309 struct flow_match_basic match;
2311 flow_rule_match_basic(rule, &match);
2312 ip_proto = match.key->ip_proto;
2314 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2315 match.mask->ip_proto);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2317 match.key->ip_proto);
2319 if (match.mask->ip_proto)
2320 *match_level = MLX5_MATCH_L3;
2323 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2324 struct flow_match_ipv4_addrs match;
2326 flow_rule_match_ipv4_addrs(rule, &match);
2327 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2328 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2329 &match.mask->src, sizeof(match.mask->src));
2330 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2331 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2332 &match.key->src, sizeof(match.key->src));
2333 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2334 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2335 &match.mask->dst, sizeof(match.mask->dst));
2336 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2337 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2338 &match.key->dst, sizeof(match.key->dst));
2340 if (match.mask->src || match.mask->dst)
2341 *match_level = MLX5_MATCH_L3;
2344 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2345 struct flow_match_ipv6_addrs match;
2347 flow_rule_match_ipv6_addrs(rule, &match);
2348 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2349 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2350 &match.mask->src, sizeof(match.mask->src));
2351 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2352 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2353 &match.key->src, sizeof(match.key->src));
2355 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2356 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2357 &match.mask->dst, sizeof(match.mask->dst));
2358 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2359 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2360 &match.key->dst, sizeof(match.key->dst));
2362 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2363 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2364 *match_level = MLX5_MATCH_L3;
2367 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2368 struct flow_match_ip match;
2370 flow_rule_match_ip(rule, &match);
2371 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2372 match.mask->tos & 0x3);
2373 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2374 match.key->tos & 0x3);
2376 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2377 match.mask->tos >> 2);
2378 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2379 match.key->tos >> 2);
2381 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2383 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2386 if (match.mask->ttl &&
2387 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2388 ft_field_support.outer_ipv4_ttl)) {
2389 NL_SET_ERR_MSG_MOD(extack,
2390 "Matching on TTL is not supported");
2394 if (match.mask->tos || match.mask->ttl)
2395 *match_level = MLX5_MATCH_L3;
2398 /* *** L3 attributes parsing up to here *** */
2400 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2401 struct flow_match_ports match;
2403 flow_rule_match_ports(rule, &match);
2406 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2407 tcp_sport, ntohs(match.mask->src));
2408 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2409 tcp_sport, ntohs(match.key->src));
2411 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2412 tcp_dport, ntohs(match.mask->dst));
2413 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2414 tcp_dport, ntohs(match.key->dst));
2418 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2419 udp_sport, ntohs(match.mask->src));
2420 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2421 udp_sport, ntohs(match.key->src));
2423 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2424 udp_dport, ntohs(match.mask->dst));
2425 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2426 udp_dport, ntohs(match.key->dst));
2429 NL_SET_ERR_MSG_MOD(extack,
2430 "Only UDP and TCP transports are supported for L4 matching");
2431 netdev_err(priv->netdev,
2432 "Only UDP and TCP transport are supported\n");
2436 if (match.mask->src || match.mask->dst)
2437 *match_level = MLX5_MATCH_L4;
2440 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2441 struct flow_match_tcp match;
2443 flow_rule_match_tcp(rule, &match);
2444 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2445 ntohs(match.mask->flags));
2446 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2447 ntohs(match.key->flags));
2449 if (match.mask->flags)
2450 *match_level = MLX5_MATCH_L4;
2456 static int parse_cls_flower(struct mlx5e_priv *priv,
2457 struct mlx5e_tc_flow *flow,
2458 struct mlx5_flow_spec *spec,
2459 struct flow_cls_offload *f,
2460 struct net_device *filter_dev)
2462 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2463 struct netlink_ext_ack *extack = f->common.extack;
2464 struct mlx5_core_dev *dev = priv->mdev;
2465 struct mlx5_eswitch *esw = dev->priv.eswitch;
2466 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2467 struct mlx5_eswitch_rep *rep;
2468 bool is_eswitch_flow;
2471 inner_match_level = MLX5_MATCH_NONE;
2472 outer_match_level = MLX5_MATCH_NONE;
2474 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2475 &inner_match_level, &outer_match_level);
2476 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2477 outer_match_level : inner_match_level;
2479 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2480 if (!err && is_eswitch_flow) {
2482 if (rep->vport != MLX5_VPORT_UPLINK &&
2483 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2484 esw->offloads.inline_mode < non_tunnel_match_level)) {
2485 NL_SET_ERR_MSG_MOD(extack,
2486 "Flow is not offloaded due to min inline setting");
2487 netdev_warn(priv->netdev,
2488 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2489 non_tunnel_match_level, esw->offloads.inline_mode);
2494 if (is_eswitch_flow) {
2495 flow->esw_attr->inner_match_level = inner_match_level;
2496 flow->esw_attr->outer_match_level = outer_match_level;
2498 flow->nic_attr->match_level = non_tunnel_match_level;
2504 struct pedit_headers {
2506 struct vlan_hdr vlan;
2513 struct pedit_headers_action {
2514 struct pedit_headers vals;
2515 struct pedit_headers masks;
2519 static int pedit_header_offsets[] = {
2520 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2521 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2522 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2523 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2524 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2527 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2529 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2530 struct pedit_headers_action *hdrs)
2532 u32 *curr_pmask, *curr_pval;
2534 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2535 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2537 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2540 *curr_pmask |= mask;
2541 *curr_pval |= (val & mask);
2549 struct mlx5_fields {
2557 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2558 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2559 offsetof(struct pedit_headers, field) + (off), \
2560 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2562 /* masked values are the same and there are no rewrites that do not have a
2565 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2566 type matchmaskx = *(type *)(matchmaskp); \
2567 type matchvalx = *(type *)(matchvalp); \
2568 type maskx = *(type *)(maskp); \
2569 type valx = *(type *)(valp); \
2571 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2575 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2576 void *matchmaskp, u8 bsize)
2582 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2585 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2588 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2595 static struct mlx5_fields fields[] = {
2596 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2597 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2598 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2599 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2600 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2601 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2603 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2604 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2605 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2606 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2608 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2609 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2610 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2611 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2612 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2613 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2614 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2615 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2616 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2617 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2618 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2619 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2620 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2621 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2622 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2623 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2624 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2626 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2627 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2628 /* in linux iphdr tcp_flags is 8 bits long */
2629 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2631 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2632 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2635 static int offload_pedit_fields(struct mlx5e_priv *priv,
2637 struct pedit_headers_action *hdrs,
2638 struct mlx5e_tc_flow_parse_attr *parse_attr,
2640 struct netlink_ext_ack *extack)
2642 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2643 int i, action_size, first, last, next_z;
2644 void *headers_c, *headers_v, *action, *vals_p;
2645 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2646 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2647 struct mlx5_fields *f;
2654 mod_acts = &parse_attr->mod_hdr_acts;
2655 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2656 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2658 set_masks = &hdrs[0].masks;
2659 add_masks = &hdrs[1].masks;
2660 set_vals = &hdrs[0].vals;
2661 add_vals = &hdrs[1].vals;
2663 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2665 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2669 /* avoid seeing bits set from previous iterations */
2673 s_masks_p = (void *)set_masks + f->offset;
2674 a_masks_p = (void *)add_masks + f->offset;
2676 s_mask = *s_masks_p & f->field_mask;
2677 a_mask = *a_masks_p & f->field_mask;
2679 if (!s_mask && !a_mask) /* nothing to offload here */
2682 if (s_mask && a_mask) {
2683 NL_SET_ERR_MSG_MOD(extack,
2684 "can't set and add to the same HW field");
2685 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2691 void *match_mask = headers_c + f->match_offset;
2692 void *match_val = headers_v + f->match_offset;
2694 cmd = MLX5_ACTION_TYPE_SET;
2696 vals_p = (void *)set_vals + f->offset;
2697 /* don't rewrite if we have a match on the same value */
2698 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2699 match_mask, f->field_bsize))
2701 /* clear to denote we consumed this field */
2702 *s_masks_p &= ~f->field_mask;
2704 cmd = MLX5_ACTION_TYPE_ADD;
2706 vals_p = (void *)add_vals + f->offset;
2707 /* add 0 is no change */
2708 if ((*(u32 *)vals_p & f->field_mask) == 0)
2710 /* clear to denote we consumed this field */
2711 *a_masks_p &= ~f->field_mask;
2716 if (f->field_bsize == 32) {
2717 mask_be32 = (__be32)mask;
2718 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2719 } else if (f->field_bsize == 16) {
2720 mask_be32 = (__be32)mask;
2721 mask_be16 = *(__be16 *)&mask_be32;
2722 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2725 first = find_first_bit(&mask, f->field_bsize);
2726 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2727 last = find_last_bit(&mask, f->field_bsize);
2728 if (first < next_z && next_z < last) {
2729 NL_SET_ERR_MSG_MOD(extack,
2730 "rewrite of few sub-fields isn't supported");
2731 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2736 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2738 NL_SET_ERR_MSG_MOD(extack,
2739 "too many pedit actions, can't offload");
2740 mlx5_core_warn(priv->mdev,
2741 "mlx5: parsed %d pedit actions, can't do more\n",
2742 mod_acts->num_actions);
2746 action = mod_acts->actions +
2747 (mod_acts->num_actions * action_size);
2748 MLX5_SET(set_action_in, action, action_type, cmd);
2749 MLX5_SET(set_action_in, action, field, f->field);
2751 if (cmd == MLX5_ACTION_TYPE_SET) {
2754 /* if field is bit sized it can start not from first bit */
2755 start = find_first_bit((unsigned long *)&f->field_mask,
2758 MLX5_SET(set_action_in, action, offset, first - start);
2759 /* length is num of bits to be written, zero means length of 32 */
2760 MLX5_SET(set_action_in, action, length, (last - first + 1));
2763 if (f->field_bsize == 32)
2764 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2765 else if (f->field_bsize == 16)
2766 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2767 else if (f->field_bsize == 8)
2768 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2770 ++mod_acts->num_actions;
2776 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2779 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2780 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2781 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2782 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2785 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2787 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2789 int action_size, new_num_actions, max_hw_actions;
2790 size_t new_sz, old_sz;
2793 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2796 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2798 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2800 new_num_actions = min(max_hw_actions,
2801 mod_hdr_acts->actions ?
2802 mod_hdr_acts->max_actions * 2 : 1);
2803 if (mod_hdr_acts->max_actions == new_num_actions)
2806 new_sz = action_size * new_num_actions;
2807 old_sz = mod_hdr_acts->max_actions * action_size;
2808 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2812 memset(ret + old_sz, 0, new_sz - old_sz);
2813 mod_hdr_acts->actions = ret;
2814 mod_hdr_acts->max_actions = new_num_actions;
2819 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2821 kfree(mod_hdr_acts->actions);
2822 mod_hdr_acts->actions = NULL;
2823 mod_hdr_acts->num_actions = 0;
2824 mod_hdr_acts->max_actions = 0;
2827 static const struct pedit_headers zero_masks = {};
2829 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2830 const struct flow_action_entry *act, int namespace,
2831 struct pedit_headers_action *hdrs,
2832 struct netlink_ext_ack *extack)
2834 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2835 int err = -EOPNOTSUPP;
2836 u32 mask, val, offset;
2839 htype = act->mangle.htype;
2840 err = -EOPNOTSUPP; /* can't be all optimistic */
2842 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2843 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2847 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2848 NL_SET_ERR_MSG_MOD(extack,
2849 "The pedit offload action is not supported");
2853 mask = act->mangle.mask;
2854 val = act->mangle.val;
2855 offset = act->mangle.offset;
2857 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2868 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2869 struct mlx5e_tc_flow_parse_attr *parse_attr,
2870 struct pedit_headers_action *hdrs,
2872 struct netlink_ext_ack *extack)
2874 struct pedit_headers *cmd_masks;
2878 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2879 action_flags, extack);
2881 goto out_dealloc_parsed_actions;
2883 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2884 cmd_masks = &hdrs[cmd].masks;
2885 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2886 NL_SET_ERR_MSG_MOD(extack,
2887 "attempt to offload an unsupported field");
2888 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2889 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2890 16, 1, cmd_masks, sizeof(zero_masks), true);
2892 goto out_dealloc_parsed_actions;
2898 out_dealloc_parsed_actions:
2899 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2903 static bool csum_offload_supported(struct mlx5e_priv *priv,
2906 struct netlink_ext_ack *extack)
2908 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2909 TCA_CSUM_UPDATE_FLAG_UDP;
2911 /* The HW recalcs checksums only if re-writing headers */
2912 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2913 NL_SET_ERR_MSG_MOD(extack,
2914 "TC csum action is only offloaded with pedit");
2915 netdev_warn(priv->netdev,
2916 "TC csum action is only offloaded with pedit\n");
2920 if (update_flags & ~prot_flags) {
2921 NL_SET_ERR_MSG_MOD(extack,
2922 "can't offload TC csum action for some header/s");
2923 netdev_warn(priv->netdev,
2924 "can't offload TC csum action for some header/s - flags %#x\n",
2932 struct ip_ttl_word {
2938 struct ipv6_hoplimit_word {
2944 static int is_action_keys_supported(const struct flow_action_entry *act,
2945 bool ct_flow, bool *modify_ip_header,
2946 struct netlink_ext_ack *extack)
2951 htype = act->mangle.htype;
2952 offset = act->mangle.offset;
2953 mask = ~act->mangle.mask;
2954 /* For IPv4 & IPv6 header check 4 byte word,
2955 * to determine that modified fields
2956 * are NOT ttl & hop_limit only.
2958 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2959 struct ip_ttl_word *ttl_word =
2960 (struct ip_ttl_word *)&mask;
2962 if (offset != offsetof(struct iphdr, ttl) ||
2963 ttl_word->protocol ||
2965 *modify_ip_header = true;
2968 if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
2969 NL_SET_ERR_MSG_MOD(extack,
2970 "can't offload re-write of ipv4 address with action ct");
2973 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2974 struct ipv6_hoplimit_word *hoplimit_word =
2975 (struct ipv6_hoplimit_word *)&mask;
2977 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2978 hoplimit_word->payload_len ||
2979 hoplimit_word->nexthdr) {
2980 *modify_ip_header = true;
2983 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
2984 NL_SET_ERR_MSG_MOD(extack,
2985 "can't offload re-write of ipv6 address with action ct");
2988 } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
2989 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
2990 NL_SET_ERR_MSG_MOD(extack,
2991 "can't offload re-write of transport header ports with action ct");
2998 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2999 struct flow_action *flow_action,
3000 u32 actions, bool ct_flow,
3001 struct netlink_ext_ack *extack)
3003 const struct flow_action_entry *act;
3004 bool modify_ip_header;
3010 headers_v = get_match_headers_value(actions, spec);
3011 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3013 /* for non-IP we only re-write MACs, so we're okay */
3014 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3017 modify_ip_header = false;
3018 flow_action_for_each(i, act, flow_action) {
3019 if (act->id != FLOW_ACTION_MANGLE &&
3020 act->id != FLOW_ACTION_ADD)
3023 err = is_action_keys_supported(act, ct_flow,
3024 &modify_ip_header, extack);
3029 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3030 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3031 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3032 NL_SET_ERR_MSG_MOD(extack,
3033 "can't offload re-write of non TCP/UDP");
3034 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
3042 static bool actions_match_supported(struct mlx5e_priv *priv,
3043 struct flow_action *flow_action,
3044 struct mlx5e_tc_flow_parse_attr *parse_attr,
3045 struct mlx5e_tc_flow *flow,
3046 struct netlink_ext_ack *extack)
3051 ct_flow = flow_flag_test(flow, CT);
3052 if (mlx5e_is_eswitch_flow(flow)) {
3053 actions = flow->esw_attr->action;
3055 if (flow->esw_attr->split_count && ct_flow) {
3056 /* All registers used by ct are cleared when using
3059 NL_SET_ERR_MSG_MOD(extack,
3060 "Can't offload mirroring with action ct");
3064 actions = flow->nic_attr->action;
3067 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3068 return modify_header_match_supported(&parse_attr->spec,
3069 flow_action, actions,
3075 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3077 struct mlx5_core_dev *fmdev, *pmdev;
3078 u64 fsystem_guid, psystem_guid;
3081 pmdev = peer_priv->mdev;
3083 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3084 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3086 return (fsystem_guid == psystem_guid);
3089 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3090 const struct flow_action_entry *act,
3091 struct mlx5e_tc_flow_parse_attr *parse_attr,
3092 struct pedit_headers_action *hdrs,
3093 u32 *action, struct netlink_ext_ack *extack)
3095 u16 mask16 = VLAN_VID_MASK;
3096 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3097 const struct flow_action_entry pedit_act = {
3098 .id = FLOW_ACTION_MANGLE,
3099 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3100 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3101 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3102 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3104 u8 match_prio_mask, match_prio_val;
3105 void *headers_c, *headers_v;
3108 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3109 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3111 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3112 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3113 NL_SET_ERR_MSG_MOD(extack,
3114 "VLAN rewrite action must have VLAN protocol match");
3118 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3119 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3120 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3121 NL_SET_ERR_MSG_MOD(extack,
3122 "Changing VLAN prio is not supported");
3126 err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
3127 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3133 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3134 struct mlx5e_tc_flow_parse_attr *parse_attr,
3135 struct pedit_headers_action *hdrs,
3136 u32 *action, struct netlink_ext_ack *extack)
3138 const struct flow_action_entry prio_tag_act = {
3141 MLX5_GET(fte_match_set_lyr_2_4,
3142 get_match_headers_value(*action,
3145 MLX5_GET(fte_match_set_lyr_2_4,
3146 get_match_headers_criteria(*action,
3151 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3152 &prio_tag_act, parse_attr, hdrs, action,
3156 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3157 struct flow_action *flow_action,
3158 struct mlx5e_tc_flow_parse_attr *parse_attr,
3159 struct mlx5e_tc_flow *flow,
3160 struct netlink_ext_ack *extack)
3162 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
3163 struct pedit_headers_action hdrs[2] = {};
3164 const struct flow_action_entry *act;
3168 if (!flow_action_has_entries(flow_action))
3171 if (!flow_action_hw_stats_check(flow_action, extack,
3172 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3175 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3177 flow_action_for_each(i, act, flow_action) {
3179 case FLOW_ACTION_ACCEPT:
3180 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3181 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3183 case FLOW_ACTION_DROP:
3184 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3185 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3186 flow_table_properties_nic_receive.flow_counter))
3187 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3189 case FLOW_ACTION_MANGLE:
3190 case FLOW_ACTION_ADD:
3191 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3196 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
3197 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3199 case FLOW_ACTION_VLAN_MANGLE:
3200 err = add_vlan_rewrite_action(priv,
3201 MLX5_FLOW_NAMESPACE_KERNEL,
3202 act, parse_attr, hdrs,
3208 case FLOW_ACTION_CSUM:
3209 if (csum_offload_supported(priv, action,
3215 case FLOW_ACTION_REDIRECT: {
3216 struct net_device *peer_dev = act->dev;
3218 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3219 same_hw_devs(priv, netdev_priv(peer_dev))) {
3220 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3221 flow_flag_set(flow, HAIRPIN);
3222 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3223 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3225 NL_SET_ERR_MSG_MOD(extack,
3226 "device is not on same HW, can't offload");
3227 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3233 case FLOW_ACTION_MARK: {
3234 u32 mark = act->mark;
3236 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3237 NL_SET_ERR_MSG_MOD(extack,
3238 "Bad flow mark - only 16 bit is supported");
3242 attr->flow_tag = mark;
3243 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3247 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3252 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3253 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3254 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3255 parse_attr, hdrs, &action, extack);
3258 /* in case all pedit actions are skipped, remove the MOD_HDR
3261 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3262 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3263 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3267 attr->action = action;
3268 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3275 const struct ip_tunnel_key *ip_tun_key;
3276 struct mlx5e_tc_tunnel *tc_tunnel;
3279 static inline int cmp_encap_info(struct encap_key *a,
3280 struct encap_key *b)
3282 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3283 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3286 static inline int hash_encap_info(struct encap_key *key)
3288 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3289 key->tc_tunnel->tunnel_type);
3293 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
3294 struct net_device *peer_netdev)
3296 struct mlx5e_priv *peer_priv;
3298 peer_priv = netdev_priv(peer_netdev);
3300 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3301 mlx5e_eswitch_rep(priv->netdev) &&
3302 mlx5e_eswitch_rep(peer_netdev) &&
3303 same_hw_devs(priv, peer_priv));
3308 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3310 return refcount_inc_not_zero(&e->refcnt);
3313 static struct mlx5e_encap_entry *
3314 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3317 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3318 struct mlx5e_encap_entry *e;
3319 struct encap_key e_key;
3321 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3322 encap_hlist, hash_key) {
3323 e_key.ip_tun_key = &e->tun_info->key;
3324 e_key.tc_tunnel = e->tunnel;
3325 if (!cmp_encap_info(&e_key, key) &&
3326 mlx5e_encap_take(e))
3333 static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
3335 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
3337 return kmemdup(tun_info, tun_size, GFP_KERNEL);
3340 static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3341 struct mlx5e_tc_flow *flow,
3343 struct mlx5e_encap_entry *e,
3344 struct netlink_ext_ack *extack)
3348 for (i = 0; i < out_index; i++) {
3349 if (flow->encaps[i].e != e)
3351 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3352 netdev_err(priv->netdev, "can't duplicate encap action\n");
3359 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3360 struct mlx5e_tc_flow *flow,
3361 struct net_device *mirred_dev,
3363 struct netlink_ext_ack *extack,
3364 struct net_device **encap_dev,
3367 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3368 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3369 struct mlx5e_tc_flow_parse_attr *parse_attr;
3370 const struct ip_tunnel_info *tun_info;
3371 struct encap_key key;
3372 struct mlx5e_encap_entry *e;
3373 unsigned short family;
3377 parse_attr = attr->parse_attr;
3378 tun_info = parse_attr->tun_info[out_index];
3379 family = ip_tunnel_info_af(tun_info);
3380 key.ip_tun_key = &tun_info->key;
3381 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3382 if (!key.tc_tunnel) {
3383 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3387 hash_key = hash_encap_info(&key);
3389 mutex_lock(&esw->offloads.encap_tbl_lock);
3390 e = mlx5e_encap_get(priv, &key, hash_key);
3392 /* must verify if encap is valid or not */
3394 /* Check that entry was not already attached to this flow */
3395 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3400 mutex_unlock(&esw->offloads.encap_tbl_lock);
3401 wait_for_completion(&e->res_ready);
3403 /* Protect against concurrent neigh update. */
3404 mutex_lock(&esw->offloads.encap_tbl_lock);
3405 if (e->compl_result < 0) {
3412 e = kzalloc(sizeof(*e), GFP_KERNEL);
3418 refcount_set(&e->refcnt, 1);
3419 init_completion(&e->res_ready);
3421 tun_info = dup_tun_info(tun_info);
3426 e->tun_info = tun_info;
3427 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3431 INIT_LIST_HEAD(&e->flows);
3432 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3433 mutex_unlock(&esw->offloads.encap_tbl_lock);
3435 if (family == AF_INET)
3436 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3437 else if (family == AF_INET6)
3438 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3440 /* Protect against concurrent neigh update. */
3441 mutex_lock(&esw->offloads.encap_tbl_lock);
3442 complete_all(&e->res_ready);
3444 e->compl_result = err;
3447 e->compl_result = 1;
3450 flow->encaps[out_index].e = e;
3451 list_add(&flow->encaps[out_index].list, &e->flows);
3452 flow->encaps[out_index].index = out_index;
3453 *encap_dev = e->out_dev;
3454 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3455 attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3456 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3457 *encap_valid = true;
3459 *encap_valid = false;
3461 mutex_unlock(&esw->offloads.encap_tbl_lock);
3466 mutex_unlock(&esw->offloads.encap_tbl_lock);
3468 mlx5e_encap_put(priv, e);
3472 mutex_unlock(&esw->offloads.encap_tbl_lock);
3478 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3479 const struct flow_action_entry *act,
3480 struct mlx5_esw_flow_attr *attr,
3483 u8 vlan_idx = attr->total_vlan;
3485 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3489 case FLOW_ACTION_VLAN_POP:
3491 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3492 MLX5_FS_VLAN_DEPTH))
3495 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3497 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3500 case FLOW_ACTION_VLAN_PUSH:
3501 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3502 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3503 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3504 if (!attr->vlan_proto[vlan_idx])
3505 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3508 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3509 MLX5_FS_VLAN_DEPTH))
3512 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3514 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3515 (act->vlan.proto != htons(ETH_P_8021Q) ||
3519 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3526 attr->total_vlan = vlan_idx + 1;
3531 static int add_vlan_push_action(struct mlx5e_priv *priv,
3532 struct mlx5_esw_flow_attr *attr,
3533 struct net_device **out_dev,
3536 struct net_device *vlan_dev = *out_dev;
3537 struct flow_action_entry vlan_act = {
3538 .id = FLOW_ACTION_VLAN_PUSH,
3539 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3540 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3545 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3549 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3550 dev_get_iflink(vlan_dev));
3551 if (is_vlan_dev(*out_dev))
3552 err = add_vlan_push_action(priv, attr, out_dev, action);
3557 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3558 struct mlx5_esw_flow_attr *attr,
3561 int nest_level = attr->parse_attr->filter_dev->lower_level;
3562 struct flow_action_entry vlan_act = {
3563 .id = FLOW_ACTION_VLAN_POP,
3567 while (nest_level--) {
3568 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3576 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3577 struct net_device *out_dev)
3579 if (is_merged_eswitch_dev(priv, out_dev))
3582 return mlx5e_eswitch_rep(out_dev) &&
3583 same_hw_devs(priv, netdev_priv(out_dev));
3586 static bool is_duplicated_output_device(struct net_device *dev,
3587 struct net_device *out_dev,
3588 int *ifindexes, int if_count,
3589 struct netlink_ext_ack *extack)
3593 for (i = 0; i < if_count; i++) {
3594 if (ifindexes[i] == out_dev->ifindex) {
3595 NL_SET_ERR_MSG_MOD(extack,
3596 "can't duplicate output to same device");
3597 netdev_err(dev, "can't duplicate output to same device: %s\n",
3606 static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
3607 struct mlx5e_tc_flow *flow,
3608 const struct flow_action_entry *act,
3610 struct netlink_ext_ack *extack)
3612 u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
3613 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3614 bool ft_flow = mlx5e_is_ft_flow(flow);
3615 u32 dest_chain = act->chain_index;
3618 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3622 if (!mlx5_esw_chains_backwards_supported(esw) &&
3623 dest_chain <= attr->chain) {
3624 NL_SET_ERR_MSG_MOD(extack,
3625 "Goto lower numbered chain isn't supported");
3628 if (dest_chain > max_chain) {
3629 NL_SET_ERR_MSG_MOD(extack,
3630 "Requested destination chain is out of supported range");
3634 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3635 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3636 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
3637 NL_SET_ERR_MSG_MOD(extack,
3638 "Goto chain is not allowed if action has reformat or decap");
3645 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3646 struct mlx5e_tc_flow *flow,
3647 struct net_device *out_dev,
3648 struct netlink_ext_ack *extack)
3650 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3651 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3652 struct mlx5e_rep_priv *rep_priv;
3654 /* Forwarding non encapsulated traffic between
3655 * uplink ports is allowed only if
3656 * termination_table_raw_traffic cap is set.
3658 * Input vport was stored esw_attr->in_rep.
3659 * In LAG case, *priv* is the private data of
3660 * uplink which may be not the input vport.
3662 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3664 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3665 mlx5e_eswitch_uplink_rep(out_dev)))
3668 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3669 termination_table_raw_traffic)) {
3670 NL_SET_ERR_MSG_MOD(extack,
3671 "devices are both uplink, can't offload forwarding");
3672 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3673 priv->netdev->name, out_dev->name);
3675 } else if (out_dev != rep_priv->netdev) {
3676 NL_SET_ERR_MSG_MOD(extack,
3677 "devices are not the same uplink, can't offload forwarding");
3678 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3679 priv->netdev->name, out_dev->name);
3685 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3686 struct flow_action *flow_action,
3687 struct mlx5e_tc_flow *flow,
3688 struct netlink_ext_ack *extack)
3690 struct pedit_headers_action hdrs[2] = {};
3691 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3692 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3693 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3694 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3695 const struct ip_tunnel_info *info = NULL;
3696 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3697 bool ft_flow = mlx5e_is_ft_flow(flow);
3698 const struct flow_action_entry *act;
3699 bool encap = false, decap = false;
3700 u32 action = attr->action;
3701 int err, i, if_count = 0;
3703 if (!flow_action_has_entries(flow_action))
3706 if (!flow_action_hw_stats_check(flow_action, extack,
3707 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3710 flow_action_for_each(i, act, flow_action) {
3712 case FLOW_ACTION_DROP:
3713 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3714 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3716 case FLOW_ACTION_MANGLE:
3717 case FLOW_ACTION_ADD:
3718 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3723 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3724 attr->split_count = attr->out_count;
3726 case FLOW_ACTION_CSUM:
3727 if (csum_offload_supported(priv, action,
3728 act->csum_flags, extack))
3732 case FLOW_ACTION_REDIRECT:
3733 case FLOW_ACTION_MIRRED: {
3734 struct mlx5e_priv *out_priv;
3735 struct net_device *out_dev;
3739 /* out_dev is NULL when filters with
3740 * non-existing mirred device are replayed to
3746 if (ft_flow && out_dev == priv->netdev) {
3747 /* Ignore forward to self rules generated
3748 * by adding both mlx5 devs to the flow table
3749 * block on a normal nft offload setup.
3754 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3755 NL_SET_ERR_MSG_MOD(extack,
3756 "can't support more output ports, can't offload forwarding");
3757 netdev_warn(priv->netdev,
3758 "can't support more than %d output ports, can't offload forwarding\n",
3763 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3764 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3766 parse_attr->mirred_ifindex[attr->out_count] =
3768 parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
3769 if (!parse_attr->tun_info[attr->out_count])
3772 attr->dests[attr->out_count].flags |=
3773 MLX5_ESW_DEST_ENCAP;
3775 /* attr->dests[].rep is resolved when we
3778 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
3779 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3780 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3781 struct net_device *uplink_upper;
3783 if (is_duplicated_output_device(priv->netdev,
3790 ifindexes[if_count] = out_dev->ifindex;
3795 netdev_master_upper_dev_get_rcu(uplink_dev);
3797 netif_is_lag_master(uplink_upper) &&
3798 uplink_upper == out_dev)
3799 out_dev = uplink_dev;
3802 if (is_vlan_dev(out_dev)) {
3803 err = add_vlan_push_action(priv, attr,
3810 if (is_vlan_dev(parse_attr->filter_dev)) {
3811 err = add_vlan_pop_action(priv, attr,
3817 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
3821 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3822 NL_SET_ERR_MSG_MOD(extack,
3823 "devices are not on same switch HW, can't offload forwarding");
3824 netdev_warn(priv->netdev,
3825 "devices %s %s not on same switch HW, can't offload forwarding\n",
3831 out_priv = netdev_priv(out_dev);
3832 rpriv = out_priv->ppriv;
3833 attr->dests[attr->out_count].rep = rpriv->rep;
3834 attr->dests[attr->out_count].mdev = out_priv->mdev;
3836 } else if (parse_attr->filter_dev != priv->netdev) {
3837 /* All mlx5 devices are called to configure
3838 * high level device filters. Therefore, the
3839 * *attempt* to install a filter on invalid
3840 * eswitch should not trigger an explicit error
3844 NL_SET_ERR_MSG_MOD(extack,
3845 "devices are not on same switch HW, can't offload forwarding");
3846 netdev_warn(priv->netdev,
3847 "devices %s %s not on same switch HW, can't offload forwarding\n",
3854 case FLOW_ACTION_TUNNEL_ENCAP:
3862 case FLOW_ACTION_VLAN_PUSH:
3863 case FLOW_ACTION_VLAN_POP:
3864 if (act->id == FLOW_ACTION_VLAN_PUSH &&
3865 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
3866 /* Replace vlan pop+push with vlan modify */
3867 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3868 err = add_vlan_rewrite_action(priv,
3869 MLX5_FLOW_NAMESPACE_FDB,
3870 act, parse_attr, hdrs,
3873 err = parse_tc_vlan_action(priv, act, attr, &action);
3878 attr->split_count = attr->out_count;
3880 case FLOW_ACTION_VLAN_MANGLE:
3881 err = add_vlan_rewrite_action(priv,
3882 MLX5_FLOW_NAMESPACE_FDB,
3883 act, parse_attr, hdrs,
3888 attr->split_count = attr->out_count;
3890 case FLOW_ACTION_TUNNEL_DECAP:
3893 case FLOW_ACTION_GOTO:
3894 err = mlx5_validate_goto_chain(esw, flow, act, action,
3899 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3900 attr->dest_chain = act->chain_index;
3902 case FLOW_ACTION_CT:
3903 err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
3907 flow_flag_set(flow, CT);
3910 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3915 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
3916 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
3917 /* For prio tag mode, replace vlan pop with rewrite vlan prio
3920 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3921 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
3927 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3928 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3929 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3930 parse_attr, hdrs, &action, extack);
3933 /* in case all pedit actions are skipped, remove the MOD_HDR
3934 * flag. we might have set split_count either by pedit or
3935 * pop/push. if there is no pop/push either, reset it too.
3937 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3938 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3939 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3940 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3941 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3942 attr->split_count = 0;
3946 attr->action = action;
3947 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3950 if (attr->dest_chain) {
3952 /* It can be supported if we'll create a mapping for
3953 * the tunnel device only (without tunnel), and set
3954 * this tunnel id with this decap flow.
3956 * On restore (miss), we'll just set this saved tunnel
3960 NL_SET_ERR_MSG(extack,
3961 "Decap with goto isn't supported");
3962 netdev_warn(priv->netdev,
3963 "Decap with goto isn't supported");
3967 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3968 NL_SET_ERR_MSG_MOD(extack,
3969 "Mirroring goto chain rules isn't supported");
3972 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3975 if (!(attr->action &
3976 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3977 NL_SET_ERR_MSG_MOD(extack,
3978 "Rule must have at least one forward/drop action");
3982 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3983 NL_SET_ERR_MSG_MOD(extack,
3984 "current firmware doesn't support split rule for port mirroring");
3985 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3992 static void get_flags(int flags, unsigned long *flow_flags)
3994 unsigned long __flow_flags = 0;
3996 if (flags & MLX5_TC_FLAG(INGRESS))
3997 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3998 if (flags & MLX5_TC_FLAG(EGRESS))
3999 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4001 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4002 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4003 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4004 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4005 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4006 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4008 *flow_flags = __flow_flags;
4011 static const struct rhashtable_params tc_ht_params = {
4012 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4013 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4014 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4015 .automatic_shrinking = true,
4018 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4019 unsigned long flags)
4021 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4022 struct mlx5e_rep_priv *uplink_rpriv;
4024 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4025 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4026 return &uplink_rpriv->uplink_priv.tc_ht;
4027 } else /* NIC offload */
4028 return &priv->fs.tc.ht;
4031 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4033 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4034 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4035 flow_flag_test(flow, INGRESS);
4036 bool act_is_encap = !!(attr->action &
4037 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4038 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
4039 MLX5_DEVCOM_ESW_OFFLOADS);
4044 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
4045 mlx5_lag_is_multipath(attr->in_mdev)) &&
4046 (is_rep_ingress || act_is_encap))
4053 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4054 struct flow_cls_offload *f, unsigned long flow_flags,
4055 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4056 struct mlx5e_tc_flow **__flow)
4058 struct mlx5e_tc_flow_parse_attr *parse_attr;
4059 struct mlx5e_tc_flow *flow;
4062 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
4063 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4064 if (!parse_attr || !flow) {
4069 flow->cookie = f->cookie;
4070 flow->flags = flow_flags;
4072 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4073 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4074 INIT_LIST_HEAD(&flow->mod_hdr);
4075 INIT_LIST_HEAD(&flow->hairpin);
4076 refcount_set(&flow->refcnt, 1);
4077 init_completion(&flow->init_done);
4080 *__parse_attr = parse_attr;
4091 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4092 struct mlx5e_priv *priv,
4093 struct mlx5e_tc_flow_parse_attr *parse_attr,
4094 struct flow_cls_offload *f,
4095 struct mlx5_eswitch_rep *in_rep,
4096 struct mlx5_core_dev *in_mdev)
4098 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4100 esw_attr->parse_attr = parse_attr;
4101 esw_attr->chain = f->common.chain_index;
4102 esw_attr->prio = f->common.prio;
4104 esw_attr->in_rep = in_rep;
4105 esw_attr->in_mdev = in_mdev;
4107 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4108 MLX5_COUNTER_SOURCE_ESWITCH)
4109 esw_attr->counter_dev = in_mdev;
4111 esw_attr->counter_dev = priv->mdev;
4114 static struct mlx5e_tc_flow *
4115 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4116 struct flow_cls_offload *f,
4117 unsigned long flow_flags,
4118 struct net_device *filter_dev,
4119 struct mlx5_eswitch_rep *in_rep,
4120 struct mlx5_core_dev *in_mdev)
4122 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4123 struct netlink_ext_ack *extack = f->common.extack;
4124 struct mlx5e_tc_flow_parse_attr *parse_attr;
4125 struct mlx5e_tc_flow *flow;
4128 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4129 attr_size = sizeof(struct mlx5_esw_flow_attr);
4130 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4131 &parse_attr, &flow);
4135 parse_attr->filter_dev = filter_dev;
4136 mlx5e_flow_esw_attr_init(flow->esw_attr,
4138 f, in_rep, in_mdev);
4140 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4145 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4149 err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
4153 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4154 complete_all(&flow->init_done);
4156 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4159 add_unready_flow(flow);
4165 mlx5e_flow_put(priv, flow);
4167 return ERR_PTR(err);
4170 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4171 struct mlx5e_tc_flow *flow,
4172 unsigned long flow_flags)
4174 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4175 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4176 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4177 struct mlx5e_tc_flow_parse_attr *parse_attr;
4178 struct mlx5e_rep_priv *peer_urpriv;
4179 struct mlx5e_tc_flow *peer_flow;
4180 struct mlx5_core_dev *in_mdev;
4183 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4187 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4188 peer_priv = netdev_priv(peer_urpriv->netdev);
4190 /* in_mdev is assigned of which the packet originated from.
4191 * So packets redirected to uplink use the same mdev of the
4192 * original flow and packets redirected from uplink use the
4195 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
4196 in_mdev = peer_priv->mdev;
4198 in_mdev = priv->mdev;
4200 parse_attr = flow->esw_attr->parse_attr;
4201 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4202 parse_attr->filter_dev,
4203 flow->esw_attr->in_rep, in_mdev);
4204 if (IS_ERR(peer_flow)) {
4205 err = PTR_ERR(peer_flow);
4209 flow->peer_flow = peer_flow;
4210 flow_flag_set(flow, DUP);
4211 mutex_lock(&esw->offloads.peer_mutex);
4212 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4213 mutex_unlock(&esw->offloads.peer_mutex);
4216 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4221 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4222 struct flow_cls_offload *f,
4223 unsigned long flow_flags,
4224 struct net_device *filter_dev,
4225 struct mlx5e_tc_flow **__flow)
4227 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4228 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4229 struct mlx5_core_dev *in_mdev = priv->mdev;
4230 struct mlx5e_tc_flow *flow;
4233 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4236 return PTR_ERR(flow);
4238 if (is_peer_flow_needed(flow)) {
4239 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4241 mlx5e_tc_del_fdb_flow(priv, flow);
4255 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4256 struct flow_cls_offload *f,
4257 unsigned long flow_flags,
4258 struct net_device *filter_dev,
4259 struct mlx5e_tc_flow **__flow)
4261 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4262 struct netlink_ext_ack *extack = f->common.extack;
4263 struct mlx5e_tc_flow_parse_attr *parse_attr;
4264 struct mlx5e_tc_flow *flow;
4267 /* multi-chain not supported for NIC rules */
4268 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4271 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4272 attr_size = sizeof(struct mlx5_nic_flow_attr);
4273 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4274 &parse_attr, &flow);
4278 parse_attr->filter_dev = filter_dev;
4279 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4284 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4288 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4292 flow_flag_set(flow, OFFLOADED);
4299 mlx5e_flow_put(priv, flow);
4306 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4307 struct flow_cls_offload *f,
4308 unsigned long flags,
4309 struct net_device *filter_dev,
4310 struct mlx5e_tc_flow **flow)
4312 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4313 unsigned long flow_flags;
4316 get_flags(flags, &flow_flags);
4318 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4321 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4322 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4325 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4331 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4332 struct flow_cls_offload *f, unsigned long flags)
4334 struct netlink_ext_ack *extack = f->common.extack;
4335 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4336 struct mlx5e_tc_flow *flow;
4340 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4343 NL_SET_ERR_MSG_MOD(extack,
4344 "flow cookie already exists, ignoring");
4345 netdev_warn_once(priv->netdev,
4346 "flow cookie %lx already exists, ignoring\n",
4352 trace_mlx5e_configure_flower(f);
4353 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4357 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4364 mlx5e_flow_put(priv, flow);
4369 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4371 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4372 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4374 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4375 flow_flag_test(flow, EGRESS) == dir_egress;
4378 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4379 struct flow_cls_offload *f, unsigned long flags)
4381 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4382 struct mlx5e_tc_flow *flow;
4386 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4387 if (!flow || !same_flow_direction(flow, flags)) {
4392 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4395 if (flow_flag_test_and_set(flow, DELETED)) {
4399 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4402 trace_mlx5e_delete_flower(f);
4403 mlx5e_flow_put(priv, flow);
4412 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4413 struct flow_cls_offload *f, unsigned long flags)
4415 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4416 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4417 struct mlx5_eswitch *peer_esw;
4418 struct mlx5e_tc_flow *flow;
4419 struct mlx5_fc *counter;
4426 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4430 return PTR_ERR(flow);
4432 if (!same_flow_direction(flow, flags)) {
4437 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4438 counter = mlx5e_tc_get_counter(flow);
4442 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4445 /* Under multipath it's possible for one rule to be currently
4446 * un-offloaded while the other rule is offloaded.
4448 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4452 if (flow_flag_test(flow, DUP) &&
4453 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4458 counter = mlx5e_tc_get_counter(flow->peer_flow);
4460 goto no_peer_counter;
4461 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4464 packets += packets2;
4465 lastuse = max_t(u64, lastuse, lastuse2);
4469 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4471 flow_stats_update(&f->stats, bytes, packets, lastuse,
4472 FLOW_ACTION_HW_STATS_DELAYED);
4473 trace_mlx5e_stats_flower(f);
4475 mlx5e_flow_put(priv, flow);
4479 static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4480 struct netlink_ext_ack *extack)
4482 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4483 struct mlx5_eswitch *esw;
4488 vport_num = rpriv->rep->vport;
4489 if (vport_num >= MLX5_VPORT_ECPF) {
4490 NL_SET_ERR_MSG_MOD(extack,
4491 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4495 esw = priv->mdev->priv.eswitch;
4496 /* rate is given in bytes/sec.
4497 * First convert to bits/sec and then round to the nearest mbit/secs.
4498 * mbit means million bits.
4499 * Moreover, if rate is non zero we choose to configure to a minimum of
4502 rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4503 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4505 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4510 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4511 struct flow_action *flow_action,
4512 struct netlink_ext_ack *extack)
4514 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4515 const struct flow_action_entry *act;
4519 if (!flow_action_has_entries(flow_action)) {
4520 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4524 if (!flow_offload_has_one_action(flow_action)) {
4525 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4529 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4532 flow_action_for_each(i, act, flow_action) {
4534 case FLOW_ACTION_POLICE:
4535 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4539 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4542 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4550 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4551 struct tc_cls_matchall_offload *ma)
4553 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4554 struct netlink_ext_ack *extack = ma->common.extack;
4556 if (!mlx5_esw_qos_enabled(esw)) {
4557 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4561 if (ma->common.prio != 1) {
4562 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4566 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4569 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4570 struct tc_cls_matchall_offload *ma)
4572 struct netlink_ext_ack *extack = ma->common.extack;
4574 return apply_police_params(priv, 0, extack);
4577 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4578 struct tc_cls_matchall_offload *ma)
4580 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4581 struct rtnl_link_stats64 cur_stats;
4585 cur_stats = priv->stats.vf_vport;
4586 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4587 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4588 rpriv->prev_vf_vport_stats = cur_stats;
4589 flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
4590 FLOW_ACTION_HW_STATS_DELAYED);
4593 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4594 struct mlx5e_priv *peer_priv)
4596 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4597 struct mlx5e_hairpin_entry *hpe, *tmp;
4598 LIST_HEAD(init_wait_list);
4602 if (!same_hw_devs(priv, peer_priv))
4605 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4607 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4608 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4609 if (refcount_inc_not_zero(&hpe->refcnt))
4610 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4611 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4613 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4614 wait_for_completion(&hpe->res_ready);
4615 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4616 hpe->hp->pair->peer_gone = true;
4618 mlx5e_hairpin_put(priv, hpe);
4622 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4623 unsigned long event, void *ptr)
4625 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4626 struct mlx5e_flow_steering *fs;
4627 struct mlx5e_priv *peer_priv;
4628 struct mlx5e_tc_table *tc;
4629 struct mlx5e_priv *priv;
4631 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4632 event != NETDEV_UNREGISTER ||
4633 ndev->reg_state == NETREG_REGISTERED)
4636 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4637 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4638 priv = container_of(fs, struct mlx5e_priv, fs);
4639 peer_priv = netdev_priv(ndev);
4640 if (priv == peer_priv ||
4641 !(priv->netdev->features & NETIF_F_HW_TC))
4644 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4649 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4651 struct mlx5e_tc_table *tc = &priv->fs.tc;
4654 mutex_init(&tc->t_lock);
4655 mutex_init(&tc->mod_hdr.lock);
4656 hash_init(tc->mod_hdr.hlist);
4657 mutex_init(&tc->hairpin_tbl_lock);
4658 hash_init(tc->hairpin_tbl);
4660 err = rhashtable_init(&tc->ht, &tc_ht_params);
4664 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4665 err = register_netdevice_notifier_dev_net(priv->netdev,
4669 tc->netdevice_nb.notifier_call = NULL;
4670 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4676 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4678 struct mlx5e_tc_flow *flow = ptr;
4679 struct mlx5e_priv *priv = flow->priv;
4681 mlx5e_tc_del_flow(priv, flow);
4685 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4687 struct mlx5e_tc_table *tc = &priv->fs.tc;
4689 if (tc->netdevice_nb.notifier_call)
4690 unregister_netdevice_notifier_dev_net(priv->netdev,
4694 mutex_destroy(&tc->mod_hdr.lock);
4695 mutex_destroy(&tc->hairpin_tbl_lock);
4697 rhashtable_destroy(&tc->ht);
4699 if (!IS_ERR_OR_NULL(tc->t)) {
4700 mlx5_destroy_flow_table(tc->t);
4703 mutex_destroy(&tc->t_lock);
4706 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4708 const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
4709 struct mlx5_rep_uplink_priv *uplink_priv;
4710 struct mlx5e_rep_priv *priv;
4711 struct mapping_ctx *mapping;
4714 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4715 priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
4717 err = mlx5_tc_ct_init(uplink_priv);
4721 mapping = mapping_create(sizeof(struct tunnel_match_key),
4722 TUNNEL_INFO_BITS_MASK, true);
4723 if (IS_ERR(mapping)) {
4724 err = PTR_ERR(mapping);
4725 goto err_tun_mapping;
4727 uplink_priv->tunnel_mapping = mapping;
4729 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
4730 if (IS_ERR(mapping)) {
4731 err = PTR_ERR(mapping);
4732 goto err_enc_opts_mapping;
4734 uplink_priv->tunnel_enc_opts_mapping = mapping;
4736 err = rhashtable_init(tc_ht, &tc_ht_params);
4743 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4744 err_enc_opts_mapping:
4745 mapping_destroy(uplink_priv->tunnel_mapping);
4747 mlx5_tc_ct_clean(uplink_priv);
4749 netdev_warn(priv->netdev,
4750 "Failed to initialize tc (eswitch), err: %d", err);
4754 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4756 struct mlx5_rep_uplink_priv *uplink_priv;
4758 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4760 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4761 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4762 mapping_destroy(uplink_priv->tunnel_mapping);
4764 mlx5_tc_ct_clean(uplink_priv);
4767 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
4769 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4771 return atomic_read(&tc_ht->nelems);
4774 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
4776 struct mlx5e_tc_flow *flow, *tmp;
4778 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
4779 __mlx5e_tc_del_fdb_peer_flow(flow);
4782 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4784 struct mlx5_rep_uplink_priv *rpriv =
4785 container_of(work, struct mlx5_rep_uplink_priv,
4786 reoffload_flows_work);
4787 struct mlx5e_tc_flow *flow, *tmp;
4789 mutex_lock(&rpriv->unready_flows_lock);
4790 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4791 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4792 unready_flow_del(flow);
4794 mutex_unlock(&rpriv->unready_flows_lock);
4797 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4798 static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
4799 struct mlx5e_tc_update_priv *tc_priv,
4802 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4803 struct flow_dissector_key_enc_opts enc_opts = {};
4804 struct mlx5_rep_uplink_priv *uplink_priv;
4805 struct mlx5e_rep_priv *uplink_rpriv;
4806 struct metadata_dst *tun_dst;
4807 struct tunnel_match_key key;
4808 u32 tun_id, enc_opts_id;
4809 struct net_device *dev;
4812 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
4813 tun_id = tunnel_id >> ENC_OPTS_BITS;
4818 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4819 uplink_priv = &uplink_rpriv->uplink_priv;
4821 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
4824 netdev_dbg(priv->netdev,
4825 "Couldn't find tunnel for tun_id: %d, err: %d\n",
4831 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
4832 enc_opts_id, &enc_opts);
4834 netdev_dbg(priv->netdev,
4835 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
4841 tun_dst = tun_rx_dst(enc_opts.len);
4847 ip_tunnel_key_init(&tun_dst->u.tun_info.key,
4848 key.enc_ipv4.src, key.enc_ipv4.dst,
4849 key.enc_ip.tos, key.enc_ip.ttl,
4851 key.enc_tp.src, key.enc_tp.dst,
4852 key32_to_tunnel_id(key.enc_key_id.keyid),
4856 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
4857 enc_opts.len, enc_opts.dst_opt_type);
4859 skb_dst_set(skb, (struct dst_entry *)tun_dst);
4860 dev = dev_get_by_index(&init_net, key.filter_ifindex);
4862 netdev_dbg(priv->netdev,
4863 "Couldn't find tunnel device with ifindex: %d\n",
4864 key.filter_ifindex);
4868 /* Set tun_dev so we do dev_put() after datapath */
4869 tc_priv->tun_dev = dev;
4875 #endif /* CONFIG_NET_TC_SKB_EXT */
4877 bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
4878 struct sk_buff *skb,
4879 struct mlx5e_tc_update_priv *tc_priv)
4881 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4882 u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
4883 struct mlx5_rep_uplink_priv *uplink_priv;
4884 struct mlx5e_rep_priv *uplink_rpriv;
4885 struct tc_skb_ext *tc_skb_ext;
4886 struct mlx5_eswitch *esw;
4887 struct mlx5e_priv *priv;
4891 reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
4892 if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
4894 reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
4899 priv = netdev_priv(skb->dev);
4900 esw = priv->mdev->priv.eswitch;
4902 err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
4904 netdev_dbg(priv->netdev,
4905 "Couldn't find chain for chain tag: %d, err: %d\n",
4911 tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
4917 tc_skb_ext->chain = chain;
4919 tuple_id = reg_c1 & TUPLE_ID_MAX;
4921 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4922 uplink_priv = &uplink_rpriv->uplink_priv;
4923 if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
4927 tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
4928 tunnel_id = reg_c1 >> (8 * tunnel_moffset);
4929 return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
4930 #endif /* CONFIG_NET_TC_SKB_EXT */
4935 void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
4937 if (tc_priv->tun_dev)
4938 dev_put(tc_priv->tun_dev);