2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
54 #include "en/tc_tun.h"
55 #include "lib/devcom.h"
57 struct mlx5_nic_flow_attr {
63 struct mlx5_flow_table *hairpin_ft;
64 struct mlx5_fc *counter;
67 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
70 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
71 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
72 MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
73 MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
74 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
75 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
76 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
77 MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
78 MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
81 #define MLX5E_TC_MAX_SPLITS 1
83 /* Helper struct for accessing a struct containing list_head array.
92 * To access the containing struct from one of the list_head items:
93 * 1. Get the helper item from the list_head item using
95 * container_of(list_head item, helper struct type, list_head field)
96 * 2. Get the contining struct from the helper item and its index in the array:
98 * container_of(helper item, containing struct type, helper field[index])
100 struct encap_flow_item {
101 struct list_head list;
105 struct mlx5e_tc_flow {
106 struct rhash_head node;
107 struct mlx5e_priv *priv;
110 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
111 /* Flow can be associated with multiple encap IDs.
112 * The number of encaps is bounded by the number of supported
115 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
116 struct mlx5e_tc_flow *peer_flow;
117 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
118 struct list_head hairpin; /* flows sharing the same hairpin */
119 struct list_head peer; /* flows with peer flow */
121 struct mlx5_esw_flow_attr esw_attr[0];
122 struct mlx5_nic_flow_attr nic_attr[0];
126 struct mlx5e_tc_flow_parse_attr {
127 struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
128 struct net_device *filter_dev;
129 struct mlx5_flow_spec spec;
130 int num_mod_hdr_actions;
131 int max_mod_hdr_actions;
132 void *mod_hdr_actions;
133 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
136 #define MLX5E_TC_TABLE_NUM_GROUPS 4
137 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
139 struct mlx5e_hairpin {
140 struct mlx5_hairpin *pair;
142 struct mlx5_core_dev *func_mdev;
143 struct mlx5e_priv *func_priv;
148 struct mlx5e_rqt indir_rqt;
149 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
150 struct mlx5e_ttc_table ttc;
153 struct mlx5e_hairpin_entry {
154 /* a node of a hash table which keeps all the hairpin entries */
155 struct hlist_node hairpin_hlist;
157 /* flows sharing the same hairpin */
158 struct list_head flows;
162 struct mlx5e_hairpin *hp;
170 struct mlx5e_mod_hdr_entry {
171 /* a node of a hash table which keeps all the mod_hdr entries */
172 struct hlist_node mod_hdr_hlist;
174 /* flows sharing the same mod_hdr entry */
175 struct list_head flows;
177 struct mod_hdr_key key;
182 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
184 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
186 return jhash(key->actions,
187 key->num_actions * MLX5_MH_ACT_SZ, 0);
190 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
191 struct mod_hdr_key *b)
193 if (a->num_actions != b->num_actions)
196 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
199 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
200 struct mlx5e_tc_flow *flow,
201 struct mlx5e_tc_flow_parse_attr *parse_attr)
203 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
204 int num_actions, actions_size, namespace, err;
205 struct mlx5e_mod_hdr_entry *mh;
206 struct mod_hdr_key key;
210 num_actions = parse_attr->num_mod_hdr_actions;
211 actions_size = MLX5_MH_ACT_SZ * num_actions;
213 key.actions = parse_attr->mod_hdr_actions;
214 key.num_actions = num_actions;
216 hash_key = hash_mod_hdr_info(&key);
218 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
219 namespace = MLX5_FLOW_NAMESPACE_FDB;
220 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
221 mod_hdr_hlist, hash_key) {
222 if (!cmp_mod_hdr_info(&mh->key, &key)) {
228 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
229 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
230 mod_hdr_hlist, hash_key) {
231 if (!cmp_mod_hdr_info(&mh->key, &key)) {
241 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
245 mh->key.actions = (void *)mh + sizeof(*mh);
246 memcpy(mh->key.actions, key.actions, actions_size);
247 mh->key.num_actions = num_actions;
248 INIT_LIST_HEAD(&mh->flows);
250 err = mlx5_modify_header_alloc(priv->mdev, namespace,
257 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
258 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
260 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
263 list_add(&flow->mod_hdr, &mh->flows);
264 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
265 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
267 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
276 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
277 struct mlx5e_tc_flow *flow)
279 struct list_head *next = flow->mod_hdr.next;
281 list_del(&flow->mod_hdr);
283 if (list_empty(next)) {
284 struct mlx5e_mod_hdr_entry *mh;
286 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
288 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
289 hash_del(&mh->mod_hdr_hlist);
295 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
297 struct net_device *netdev;
298 struct mlx5e_priv *priv;
300 netdev = __dev_get_by_index(net, ifindex);
301 priv = netdev_priv(netdev);
305 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
307 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
311 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
315 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
317 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
318 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
319 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
321 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
328 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
333 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
335 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
336 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
339 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
341 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
342 struct mlx5e_priv *priv = hp->func_priv;
343 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
345 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
348 for (i = 0; i < sz; i++) {
350 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
351 ix = mlx5e_bits_invert(i, ilog2(sz));
352 ix = indirection_rqt[ix];
353 rqn = hp->pair->rqn[ix];
354 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
358 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
360 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
361 struct mlx5e_priv *priv = hp->func_priv;
362 struct mlx5_core_dev *mdev = priv->mdev;
366 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
367 in = kvzalloc(inlen, GFP_KERNEL);
371 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
373 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
374 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
376 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
378 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
380 hp->indir_rqt.enabled = true;
386 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
388 struct mlx5e_priv *priv = hp->func_priv;
389 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
393 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
394 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
396 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
397 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
399 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
400 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
401 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
402 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
404 err = mlx5_core_create_tir(hp->func_mdev, in,
405 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
407 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
408 goto err_destroy_tirs;
414 for (i = 0; i < tt; i++)
415 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
419 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
423 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
424 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
427 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
428 struct ttc_params *ttc_params)
430 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
433 memset(ttc_params, 0, sizeof(*ttc_params));
435 ttc_params->any_tt_tirn = hp->tirn;
437 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
438 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
440 ft_attr->max_fte = MLX5E_NUM_TT;
441 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
442 ft_attr->prio = MLX5E_TC_PRIO;
445 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
447 struct mlx5e_priv *priv = hp->func_priv;
448 struct ttc_params ttc_params;
451 err = mlx5e_hairpin_create_indirect_rqt(hp);
455 err = mlx5e_hairpin_create_indirect_tirs(hp);
457 goto err_create_indirect_tirs;
459 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
460 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
462 goto err_create_ttc_table;
464 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
465 hp->num_channels, hp->ttc.ft.t->id);
469 err_create_ttc_table:
470 mlx5e_hairpin_destroy_indirect_tirs(hp);
471 err_create_indirect_tirs:
472 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
477 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
479 struct mlx5e_priv *priv = hp->func_priv;
481 mlx5e_destroy_ttc_table(priv, &hp->ttc);
482 mlx5e_hairpin_destroy_indirect_tirs(hp);
483 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
486 static struct mlx5e_hairpin *
487 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
490 struct mlx5_core_dev *func_mdev, *peer_mdev;
491 struct mlx5e_hairpin *hp;
492 struct mlx5_hairpin *pair;
495 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
497 return ERR_PTR(-ENOMEM);
499 func_mdev = priv->mdev;
500 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
502 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
505 goto create_pair_err;
508 hp->func_mdev = func_mdev;
509 hp->func_priv = priv;
510 hp->num_channels = params->num_channels;
512 err = mlx5e_hairpin_create_transport(hp);
514 goto create_transport_err;
516 if (hp->num_channels > 1) {
517 err = mlx5e_hairpin_rss_init(hp);
525 mlx5e_hairpin_destroy_transport(hp);
526 create_transport_err:
527 mlx5_core_hairpin_destroy(hp->pair);
533 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
535 if (hp->num_channels > 1)
536 mlx5e_hairpin_rss_cleanup(hp);
537 mlx5e_hairpin_destroy_transport(hp);
538 mlx5_core_hairpin_destroy(hp->pair);
542 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
544 return (peer_vhca_id << 16 | prio);
547 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
548 u16 peer_vhca_id, u8 prio)
550 struct mlx5e_hairpin_entry *hpe;
551 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
553 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
554 hairpin_hlist, hash_key) {
555 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
562 #define UNKNOWN_MATCH_PRIO 8
564 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
565 struct mlx5_flow_spec *spec, u8 *match_prio,
566 struct netlink_ext_ack *extack)
568 void *headers_c, *headers_v;
569 u8 prio_val, prio_mask = 0;
572 #ifdef CONFIG_MLX5_CORE_EN_DCB
573 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
574 NL_SET_ERR_MSG_MOD(extack,
575 "only PCP trust state supported for hairpin");
579 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
580 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
582 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
584 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
585 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
588 if (!vlan_present || !prio_mask) {
589 prio_val = UNKNOWN_MATCH_PRIO;
590 } else if (prio_mask != 0x7) {
591 NL_SET_ERR_MSG_MOD(extack,
592 "masked priority match not supported for hairpin");
596 *match_prio = prio_val;
600 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
601 struct mlx5e_tc_flow *flow,
602 struct mlx5e_tc_flow_parse_attr *parse_attr,
603 struct netlink_ext_ack *extack)
605 int peer_ifindex = parse_attr->mirred_ifindex[0];
606 struct mlx5_hairpin_params params;
607 struct mlx5_core_dev *peer_mdev;
608 struct mlx5e_hairpin_entry *hpe;
609 struct mlx5e_hairpin *hp;
616 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
617 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
618 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
622 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
623 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
627 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
631 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
635 INIT_LIST_HEAD(&hpe->flows);
636 hpe->peer_vhca_id = peer_id;
637 hpe->prio = match_prio;
639 params.log_data_size = 15;
640 params.log_data_size = min_t(u8, params.log_data_size,
641 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
642 params.log_data_size = max_t(u8, params.log_data_size,
643 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
645 params.log_num_packets = params.log_data_size -
646 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
647 params.log_num_packets = min_t(u8, params.log_num_packets,
648 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
650 params.q_counter = priv->q_counter;
651 /* set hairpin pair per each 50Gbs share of the link */
652 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
653 link_speed = max_t(u32, link_speed, 50000);
654 link_speed64 = link_speed;
655 do_div(link_speed64, 50000);
656 params.num_channels = link_speed64;
658 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
661 goto create_hairpin_err;
664 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
665 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
666 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
669 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
670 hash_hairpin_info(peer_id, match_prio));
673 if (hpe->hp->num_channels > 1) {
674 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
675 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
677 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
679 list_add(&flow->hairpin, &hpe->flows);
688 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
689 struct mlx5e_tc_flow *flow)
691 struct list_head *next = flow->hairpin.next;
693 list_del(&flow->hairpin);
695 /* no more hairpin flows for us, release the hairpin pair */
696 if (list_empty(next)) {
697 struct mlx5e_hairpin_entry *hpe;
699 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
701 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
702 hpe->hp->pair->peer_mdev->priv.name);
704 mlx5e_hairpin_destroy(hpe->hp);
705 hash_del(&hpe->hairpin_hlist);
711 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
712 struct mlx5e_tc_flow_parse_attr *parse_attr,
713 struct mlx5e_tc_flow *flow,
714 struct netlink_ext_ack *extack)
716 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
717 struct mlx5_core_dev *dev = priv->mdev;
718 struct mlx5_flow_destination dest[2] = {};
719 struct mlx5_flow_act flow_act = {
720 .action = attr->action,
721 .flow_tag = attr->flow_tag,
723 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
725 struct mlx5_fc *counter = NULL;
726 bool table_created = false;
727 int err, dest_ix = 0;
729 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
730 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
732 goto err_add_hairpin_flow;
734 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
735 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
736 dest[dest_ix].ft = attr->hairpin_ft;
738 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
739 dest[dest_ix].tir_num = attr->hairpin_tirn;
742 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
743 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
744 dest[dest_ix].ft = priv->fs.vlan.ft.t;
748 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
749 counter = mlx5_fc_create(dev, true);
750 if (IS_ERR(counter)) {
751 err = PTR_ERR(counter);
754 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
755 dest[dest_ix].counter_id = mlx5_fc_id(counter);
757 attr->counter = counter;
760 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
761 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
762 flow_act.modify_id = attr->mod_hdr_id;
763 kfree(parse_attr->mod_hdr_actions);
765 goto err_create_mod_hdr_id;
768 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
769 int tc_grp_size, tc_tbl_size;
770 u32 max_flow_counter;
772 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
773 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
775 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
777 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
778 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
781 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
784 MLX5E_TC_TABLE_NUM_GROUPS,
785 MLX5E_TC_FT_LEVEL, 0);
786 if (IS_ERR(priv->fs.tc.t)) {
787 NL_SET_ERR_MSG_MOD(extack,
788 "Failed to create tc offload table\n");
789 netdev_err(priv->netdev,
790 "Failed to create tc offload table\n");
791 err = PTR_ERR(priv->fs.tc.t);
795 table_created = true;
798 if (attr->match_level != MLX5_MATCH_NONE)
799 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
801 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
802 &flow_act, dest, dest_ix);
804 if (IS_ERR(flow->rule[0])) {
805 err = PTR_ERR(flow->rule[0]);
813 mlx5_destroy_flow_table(priv->fs.tc.t);
814 priv->fs.tc.t = NULL;
817 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
818 mlx5e_detach_mod_hdr(priv, flow);
819 err_create_mod_hdr_id:
820 mlx5_fc_destroy(dev, counter);
822 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
823 mlx5e_hairpin_flow_del(priv, flow);
824 err_add_hairpin_flow:
828 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
829 struct mlx5e_tc_flow *flow)
831 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
832 struct mlx5_fc *counter = NULL;
834 counter = attr->counter;
835 mlx5_del_flow_rules(flow->rule[0]);
836 mlx5_fc_destroy(priv->mdev, counter);
838 if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
839 mlx5_destroy_flow_table(priv->fs.tc.t);
840 priv->fs.tc.t = NULL;
843 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
844 mlx5e_detach_mod_hdr(priv, flow);
846 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
847 mlx5e_hairpin_flow_del(priv, flow);
850 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
851 struct mlx5e_tc_flow *flow, int out_index);
853 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
854 struct ip_tunnel_info *tun_info,
855 struct net_device *mirred_dev,
856 struct net_device **encap_dev,
857 struct mlx5e_tc_flow *flow,
858 struct netlink_ext_ack *extack,
861 static struct mlx5_flow_handle *
862 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
863 struct mlx5e_tc_flow *flow,
864 struct mlx5_flow_spec *spec,
865 struct mlx5_esw_flow_attr *attr)
867 struct mlx5_flow_handle *rule;
869 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
873 if (attr->split_count) {
874 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
875 if (IS_ERR(flow->rule[1])) {
876 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
877 return flow->rule[1];
881 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
886 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
887 struct mlx5e_tc_flow *flow,
888 struct mlx5_esw_flow_attr *attr)
890 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
892 if (attr->split_count)
893 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
895 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
898 static struct mlx5_flow_handle *
899 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
900 struct mlx5e_tc_flow *flow,
901 struct mlx5_flow_spec *spec,
902 struct mlx5_esw_flow_attr *slow_attr)
904 struct mlx5_flow_handle *rule;
906 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
907 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
908 slow_attr->split_count = 0;
909 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
911 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
913 flow->flags |= MLX5E_TC_FLOW_SLOW;
919 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
920 struct mlx5e_tc_flow *flow,
921 struct mlx5_esw_flow_attr *slow_attr)
923 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
924 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
925 slow_attr->split_count = 0;
926 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
927 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
928 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
932 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
933 struct mlx5e_tc_flow_parse_attr *parse_attr,
934 struct mlx5e_tc_flow *flow,
935 struct netlink_ext_ack *extack)
937 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
938 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
939 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
940 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
941 struct net_device *out_dev, *encap_dev = NULL;
942 struct mlx5_fc *counter = NULL;
943 struct mlx5e_rep_priv *rpriv;
944 struct mlx5e_priv *out_priv;
945 int err = 0, encap_err = 0;
948 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
949 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
953 if (attr->chain > max_chain) {
954 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
956 goto err_max_prio_chain;
959 if (attr->prio > max_prio) {
960 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
962 goto err_max_prio_chain;
965 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
968 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
971 mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
972 out_dev = __dev_get_by_index(dev_net(priv->netdev),
974 err = mlx5e_attach_encap(priv,
975 &parse_attr->tun_info[out_index],
976 out_dev, &encap_dev, flow,
978 if (err && err != -EAGAIN)
979 goto err_attach_encap;
982 out_priv = netdev_priv(encap_dev);
983 rpriv = out_priv->ppriv;
984 attr->dests[out_index].rep = rpriv->rep;
985 attr->dests[out_index].mdev = out_priv->mdev;
988 err = mlx5_eswitch_add_vlan_action(esw, attr);
992 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
993 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
994 kfree(parse_attr->mod_hdr_actions);
999 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1000 counter = mlx5_fc_create(attr->counter_dev, true);
1001 if (IS_ERR(counter)) {
1002 err = PTR_ERR(counter);
1003 goto err_create_counter;
1006 attr->counter = counter;
1009 /* we get here if (1) there's no error or when
1010 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
1012 if (encap_err == -EAGAIN) {
1013 /* continue with goto slow path rule instead */
1014 struct mlx5_esw_flow_attr slow_attr;
1016 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1018 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1021 if (IS_ERR(flow->rule[0])) {
1022 err = PTR_ERR(flow->rule[0]);
1029 mlx5_fc_destroy(attr->counter_dev, counter);
1031 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1032 mlx5e_detach_mod_hdr(priv, flow);
1034 mlx5_eswitch_del_vlan_action(esw, attr);
1036 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1037 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1038 mlx5e_detach_encap(priv, flow, out_index);
1044 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1045 struct mlx5e_tc_flow *flow)
1047 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1048 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1049 struct mlx5_esw_flow_attr slow_attr;
1052 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1053 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1054 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1056 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1059 mlx5_eswitch_del_vlan_action(esw, attr);
1061 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1062 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1063 mlx5e_detach_encap(priv, flow, out_index);
1064 kvfree(attr->parse_attr);
1066 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1067 mlx5e_detach_mod_hdr(priv, flow);
1069 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1070 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1073 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1074 struct mlx5e_encap_entry *e)
1076 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1077 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1078 struct mlx5_flow_handle *rule;
1079 struct mlx5_flow_spec *spec;
1080 struct encap_flow_item *efi;
1081 struct mlx5e_tc_flow *flow;
1084 err = mlx5_packet_reformat_alloc(priv->mdev,
1086 e->encap_size, e->encap_header,
1087 MLX5_FLOW_NAMESPACE_FDB,
1090 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1094 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1095 mlx5e_rep_queue_neigh_stats_work(priv);
1097 list_for_each_entry(efi, &e->flows, list) {
1098 bool all_flow_encaps_valid = true;
1101 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1102 esw_attr = flow->esw_attr;
1103 spec = &esw_attr->parse_attr->spec;
1105 esw_attr->dests[efi->index].encap_id = e->encap_id;
1106 esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1107 /* Flow can be associated with multiple encap entries.
1108 * Before offloading the flow verify that all of them have
1109 * a valid neighbour.
1111 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1112 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1114 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1115 all_flow_encaps_valid = false;
1119 /* Do not offload flows with unresolved neighbors */
1120 if (!all_flow_encaps_valid)
1122 /* update from slow path rule to encap rule */
1123 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1125 err = PTR_ERR(rule);
1126 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1131 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1132 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
1133 flow->rule[0] = rule;
1137 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1138 struct mlx5e_encap_entry *e)
1140 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1141 struct mlx5_esw_flow_attr slow_attr;
1142 struct mlx5_flow_handle *rule;
1143 struct mlx5_flow_spec *spec;
1144 struct encap_flow_item *efi;
1145 struct mlx5e_tc_flow *flow;
1148 list_for_each_entry(efi, &e->flows, list) {
1149 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1150 spec = &flow->esw_attr->parse_attr->spec;
1152 /* update from encap rule to slow path rule */
1153 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1154 /* mark the flow's encap dest as non-valid */
1155 flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1158 err = PTR_ERR(rule);
1159 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1164 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1165 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1166 flow->rule[0] = rule;
1169 /* we know that the encap is valid */
1170 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1171 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1174 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1176 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1177 return flow->esw_attr->counter;
1179 return flow->nic_attr->counter;
1182 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1184 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1185 u64 bytes, packets, lastuse = 0;
1186 struct mlx5e_tc_flow *flow;
1187 struct mlx5e_encap_entry *e;
1188 struct mlx5_fc *counter;
1189 struct neigh_table *tbl;
1190 bool neigh_used = false;
1191 struct neighbour *n;
1193 if (m_neigh->family == AF_INET)
1195 #if IS_ENABLED(CONFIG_IPV6)
1196 else if (m_neigh->family == AF_INET6)
1202 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1203 struct encap_flow_item *efi;
1204 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1206 list_for_each_entry(efi, &e->flows, list) {
1207 flow = container_of(efi, struct mlx5e_tc_flow,
1208 encaps[efi->index]);
1209 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1210 counter = mlx5e_tc_get_counter(flow);
1211 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1212 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1223 nhe->reported_lastuse = jiffies;
1225 /* find the relevant neigh according to the cached device and
1228 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1232 neigh_event_send(n, NULL);
1237 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1238 struct mlx5e_tc_flow *flow, int out_index)
1240 struct list_head *next = flow->encaps[out_index].list.next;
1242 list_del(&flow->encaps[out_index].list);
1243 if (list_empty(next)) {
1244 struct mlx5e_encap_entry *e;
1246 e = list_entry(next, struct mlx5e_encap_entry, flows);
1247 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1249 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1250 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1252 hash_del_rcu(&e->encap_hlist);
1253 kfree(e->encap_header);
1258 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1260 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1262 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
1263 !(flow->flags & MLX5E_TC_FLOW_DUP))
1266 mutex_lock(&esw->offloads.peer_mutex);
1267 list_del(&flow->peer);
1268 mutex_unlock(&esw->offloads.peer_mutex);
1270 flow->flags &= ~MLX5E_TC_FLOW_DUP;
1272 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1273 kvfree(flow->peer_flow);
1274 flow->peer_flow = NULL;
1277 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1279 struct mlx5_core_dev *dev = flow->priv->mdev;
1280 struct mlx5_devcom *devcom = dev->priv.devcom;
1281 struct mlx5_eswitch *peer_esw;
1283 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1287 __mlx5e_tc_del_fdb_peer_flow(flow);
1288 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1291 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1292 struct mlx5e_tc_flow *flow)
1294 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1295 mlx5e_tc_del_fdb_peer_flow(flow);
1296 mlx5e_tc_del_fdb_flow(priv, flow);
1298 mlx5e_tc_del_nic_flow(priv, flow);
1303 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1304 struct mlx5_flow_spec *spec,
1305 struct tc_cls_flower_offload *f,
1306 struct net_device *filter_dev, u8 *match_level)
1308 struct netlink_ext_ack *extack = f->common.extack;
1309 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1311 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1314 struct flow_dissector_key_control *enc_control =
1315 skb_flow_dissector_target(f->dissector,
1316 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1320 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1321 headers_c, headers_v, match_level);
1323 NL_SET_ERR_MSG_MOD(extack,
1324 "failed to parse tunnel attributes");
1328 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1329 struct flow_dissector_key_ipv4_addrs *key =
1330 skb_flow_dissector_target(f->dissector,
1331 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1333 struct flow_dissector_key_ipv4_addrs *mask =
1334 skb_flow_dissector_target(f->dissector,
1335 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1337 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1338 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1340 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1341 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1344 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1345 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1347 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1348 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1351 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1352 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1353 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1354 struct flow_dissector_key_ipv6_addrs *key =
1355 skb_flow_dissector_target(f->dissector,
1356 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1358 struct flow_dissector_key_ipv6_addrs *mask =
1359 skb_flow_dissector_target(f->dissector,
1360 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1363 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1364 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1365 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1366 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1367 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1368 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1370 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1371 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1372 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1373 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1374 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1375 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1377 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1378 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1381 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1382 struct flow_dissector_key_ip *key =
1383 skb_flow_dissector_target(f->dissector,
1384 FLOW_DISSECTOR_KEY_ENC_IP,
1386 struct flow_dissector_key_ip *mask =
1387 skb_flow_dissector_target(f->dissector,
1388 FLOW_DISSECTOR_KEY_ENC_IP,
1391 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1394 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1395 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1397 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1398 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1401 !MLX5_CAP_ESW_FLOWTABLE_FDB
1403 ft_field_support.outer_ipv4_ttl)) {
1404 NL_SET_ERR_MSG_MOD(extack,
1405 "Matching on TTL is not supported");
1411 /* Enforce DMAC when offloading incoming tunneled flows.
1412 * Flow counters require a match on the DMAC.
1414 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1415 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1416 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1417 dmac_47_16), priv->netdev->dev_addr);
1419 /* let software handle IP fragments */
1420 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1421 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1426 static int __parse_cls_flower(struct mlx5e_priv *priv,
1427 struct mlx5_flow_spec *spec,
1428 struct tc_cls_flower_offload *f,
1429 struct net_device *filter_dev,
1430 u8 *match_level, u8 *tunnel_match_level)
1432 struct netlink_ext_ack *extack = f->common.extack;
1433 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1435 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1437 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1439 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1444 *match_level = MLX5_MATCH_NONE;
1446 if (f->dissector->used_keys &
1447 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1448 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1449 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1450 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1451 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1452 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1453 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1454 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1455 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1456 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1457 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1458 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1459 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1460 BIT(FLOW_DISSECTOR_KEY_TCP) |
1461 BIT(FLOW_DISSECTOR_KEY_IP) |
1462 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1463 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1464 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1465 f->dissector->used_keys);
1469 if ((dissector_uses_key(f->dissector,
1470 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1471 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1472 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1473 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1474 struct flow_dissector_key_control *key =
1475 skb_flow_dissector_target(f->dissector,
1476 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1478 switch (key->addr_type) {
1479 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1480 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1481 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
1488 /* In decap flow, header pointers should point to the inner
1489 * headers, outer header were already set by parse_tunnel_attr
1491 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1493 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1497 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1498 struct flow_dissector_key_basic *key =
1499 skb_flow_dissector_target(f->dissector,
1500 FLOW_DISSECTOR_KEY_BASIC,
1502 struct flow_dissector_key_basic *mask =
1503 skb_flow_dissector_target(f->dissector,
1504 FLOW_DISSECTOR_KEY_BASIC,
1506 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1507 ntohs(mask->n_proto));
1508 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1509 ntohs(key->n_proto));
1512 *match_level = MLX5_MATCH_L2;
1515 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1516 struct flow_dissector_key_vlan *key =
1517 skb_flow_dissector_target(f->dissector,
1518 FLOW_DISSECTOR_KEY_VLAN,
1520 struct flow_dissector_key_vlan *mask =
1521 skb_flow_dissector_target(f->dissector,
1522 FLOW_DISSECTOR_KEY_VLAN,
1524 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1525 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1526 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1528 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1531 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1533 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1537 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1538 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1540 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1541 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1543 *match_level = MLX5_MATCH_L2;
1545 } else if (*match_level != MLX5_MATCH_NONE) {
1546 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1547 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1548 *match_level = MLX5_MATCH_L2;
1551 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1552 struct flow_dissector_key_vlan *key =
1553 skb_flow_dissector_target(f->dissector,
1554 FLOW_DISSECTOR_KEY_CVLAN,
1556 struct flow_dissector_key_vlan *mask =
1557 skb_flow_dissector_target(f->dissector,
1558 FLOW_DISSECTOR_KEY_CVLAN,
1560 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1561 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1562 MLX5_SET(fte_match_set_misc, misc_c,
1563 outer_second_svlan_tag, 1);
1564 MLX5_SET(fte_match_set_misc, misc_v,
1565 outer_second_svlan_tag, 1);
1567 MLX5_SET(fte_match_set_misc, misc_c,
1568 outer_second_cvlan_tag, 1);
1569 MLX5_SET(fte_match_set_misc, misc_v,
1570 outer_second_cvlan_tag, 1);
1573 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1575 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1577 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1578 mask->vlan_priority);
1579 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1580 key->vlan_priority);
1582 *match_level = MLX5_MATCH_L2;
1586 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1587 struct flow_dissector_key_eth_addrs *key =
1588 skb_flow_dissector_target(f->dissector,
1589 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1591 struct flow_dissector_key_eth_addrs *mask =
1592 skb_flow_dissector_target(f->dissector,
1593 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1596 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1599 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1603 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1606 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1610 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1611 *match_level = MLX5_MATCH_L2;
1614 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1615 struct flow_dissector_key_control *key =
1616 skb_flow_dissector_target(f->dissector,
1617 FLOW_DISSECTOR_KEY_CONTROL,
1620 struct flow_dissector_key_control *mask =
1621 skb_flow_dissector_target(f->dissector,
1622 FLOW_DISSECTOR_KEY_CONTROL,
1624 addr_type = key->addr_type;
1626 /* the HW doesn't support frag first/later */
1627 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1630 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1631 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1632 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1633 key->flags & FLOW_DIS_IS_FRAGMENT);
1635 /* the HW doesn't need L3 inline to match on frag=no */
1636 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1637 *match_level = MLX5_MATCH_L2;
1638 /* *** L2 attributes parsing up to here *** */
1640 *match_level = MLX5_MATCH_L3;
1644 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1645 struct flow_dissector_key_basic *key =
1646 skb_flow_dissector_target(f->dissector,
1647 FLOW_DISSECTOR_KEY_BASIC,
1649 struct flow_dissector_key_basic *mask =
1650 skb_flow_dissector_target(f->dissector,
1651 FLOW_DISSECTOR_KEY_BASIC,
1653 ip_proto = key->ip_proto;
1655 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1657 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1661 *match_level = MLX5_MATCH_L3;
1664 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1665 struct flow_dissector_key_ipv4_addrs *key =
1666 skb_flow_dissector_target(f->dissector,
1667 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1669 struct flow_dissector_key_ipv4_addrs *mask =
1670 skb_flow_dissector_target(f->dissector,
1671 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1674 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1675 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1676 &mask->src, sizeof(mask->src));
1677 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1678 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1679 &key->src, sizeof(key->src));
1680 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1681 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1682 &mask->dst, sizeof(mask->dst));
1683 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1684 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1685 &key->dst, sizeof(key->dst));
1687 if (mask->src || mask->dst)
1688 *match_level = MLX5_MATCH_L3;
1691 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1692 struct flow_dissector_key_ipv6_addrs *key =
1693 skb_flow_dissector_target(f->dissector,
1694 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1696 struct flow_dissector_key_ipv6_addrs *mask =
1697 skb_flow_dissector_target(f->dissector,
1698 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1701 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1702 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1703 &mask->src, sizeof(mask->src));
1704 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1705 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1706 &key->src, sizeof(key->src));
1708 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1709 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1710 &mask->dst, sizeof(mask->dst));
1711 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1712 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1713 &key->dst, sizeof(key->dst));
1715 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1716 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1717 *match_level = MLX5_MATCH_L3;
1720 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1721 struct flow_dissector_key_ip *key =
1722 skb_flow_dissector_target(f->dissector,
1723 FLOW_DISSECTOR_KEY_IP,
1725 struct flow_dissector_key_ip *mask =
1726 skb_flow_dissector_target(f->dissector,
1727 FLOW_DISSECTOR_KEY_IP,
1730 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1731 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1733 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1734 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1736 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1737 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1740 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1741 ft_field_support.outer_ipv4_ttl)) {
1742 NL_SET_ERR_MSG_MOD(extack,
1743 "Matching on TTL is not supported");
1747 if (mask->tos || mask->ttl)
1748 *match_level = MLX5_MATCH_L3;
1751 /* *** L3 attributes parsing up to here *** */
1753 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1754 struct flow_dissector_key_ports *key =
1755 skb_flow_dissector_target(f->dissector,
1756 FLOW_DISSECTOR_KEY_PORTS,
1758 struct flow_dissector_key_ports *mask =
1759 skb_flow_dissector_target(f->dissector,
1760 FLOW_DISSECTOR_KEY_PORTS,
1764 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1765 tcp_sport, ntohs(mask->src));
1766 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1767 tcp_sport, ntohs(key->src));
1769 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1770 tcp_dport, ntohs(mask->dst));
1771 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1772 tcp_dport, ntohs(key->dst));
1776 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1777 udp_sport, ntohs(mask->src));
1778 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1779 udp_sport, ntohs(key->src));
1781 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1782 udp_dport, ntohs(mask->dst));
1783 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1784 udp_dport, ntohs(key->dst));
1787 NL_SET_ERR_MSG_MOD(extack,
1788 "Only UDP and TCP transports are supported for L4 matching");
1789 netdev_err(priv->netdev,
1790 "Only UDP and TCP transport are supported\n");
1794 if (mask->src || mask->dst)
1795 *match_level = MLX5_MATCH_L4;
1798 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1799 struct flow_dissector_key_tcp *key =
1800 skb_flow_dissector_target(f->dissector,
1801 FLOW_DISSECTOR_KEY_TCP,
1803 struct flow_dissector_key_tcp *mask =
1804 skb_flow_dissector_target(f->dissector,
1805 FLOW_DISSECTOR_KEY_TCP,
1808 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1809 ntohs(mask->flags));
1810 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1814 *match_level = MLX5_MATCH_L4;
1820 static int parse_cls_flower(struct mlx5e_priv *priv,
1821 struct mlx5e_tc_flow *flow,
1822 struct mlx5_flow_spec *spec,
1823 struct tc_cls_flower_offload *f,
1824 struct net_device *filter_dev)
1826 struct netlink_ext_ack *extack = f->common.extack;
1827 struct mlx5_core_dev *dev = priv->mdev;
1828 struct mlx5_eswitch *esw = dev->priv.eswitch;
1829 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1830 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1831 struct mlx5_eswitch_rep *rep;
1834 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
1836 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1838 if (rep->vport != FDB_UPLINK_VPORT &&
1839 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1840 esw->offloads.inline_mode < match_level)) {
1841 NL_SET_ERR_MSG_MOD(extack,
1842 "Flow is not offloaded due to min inline setting");
1843 netdev_warn(priv->netdev,
1844 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1845 match_level, esw->offloads.inline_mode);
1850 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1851 flow->esw_attr->match_level = match_level;
1852 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1854 flow->nic_attr->match_level = match_level;
1860 struct pedit_headers {
1868 static int pedit_header_offsets[] = {
1869 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1870 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1871 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1872 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1873 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1876 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1878 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1879 struct pedit_headers *masks,
1880 struct pedit_headers *vals)
1882 u32 *curr_pmask, *curr_pval;
1884 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1887 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1888 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1890 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1893 *curr_pmask |= mask;
1894 *curr_pval |= (val & mask);
1902 struct mlx5_fields {
1908 #define OFFLOAD(fw_field, size, field, off) \
1909 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1911 static struct mlx5_fields fields[] = {
1912 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1913 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1914 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1915 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1916 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1918 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1919 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1920 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1922 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1923 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1924 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1925 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1926 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1927 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1928 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1929 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1930 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1932 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1933 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1934 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1936 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1937 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1940 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1941 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1942 * says how many HW actions were actually parsed.
1944 static int offload_pedit_fields(struct pedit_headers *masks,
1945 struct pedit_headers *vals,
1946 struct mlx5e_tc_flow_parse_attr *parse_attr,
1947 struct netlink_ext_ack *extack)
1949 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1950 int i, action_size, nactions, max_actions, first, last, next_z;
1951 void *s_masks_p, *a_masks_p, *vals_p;
1952 struct mlx5_fields *f;
1953 u8 cmd, field_bsize;
1960 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1961 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1962 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1963 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1965 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1966 action = parse_attr->mod_hdr_actions +
1967 parse_attr->num_mod_hdr_actions * action_size;
1969 max_actions = parse_attr->max_mod_hdr_actions;
1970 nactions = parse_attr->num_mod_hdr_actions;
1972 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1974 /* avoid seeing bits set from previous iterations */
1978 s_masks_p = (void *)set_masks + f->offset;
1979 a_masks_p = (void *)add_masks + f->offset;
1981 memcpy(&s_mask, s_masks_p, f->size);
1982 memcpy(&a_mask, a_masks_p, f->size);
1984 if (!s_mask && !a_mask) /* nothing to offload here */
1987 if (s_mask && a_mask) {
1988 NL_SET_ERR_MSG_MOD(extack,
1989 "can't set and add to the same HW field");
1990 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1994 if (nactions == max_actions) {
1995 NL_SET_ERR_MSG_MOD(extack,
1996 "too many pedit actions, can't offload");
1997 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
2002 cmd = MLX5_ACTION_TYPE_SET;
2004 vals_p = (void *)set_vals + f->offset;
2005 /* clear to denote we consumed this field */
2006 memset(s_masks_p, 0, f->size);
2008 cmd = MLX5_ACTION_TYPE_ADD;
2010 vals_p = (void *)add_vals + f->offset;
2011 /* clear to denote we consumed this field */
2012 memset(a_masks_p, 0, f->size);
2015 field_bsize = f->size * BITS_PER_BYTE;
2017 if (field_bsize == 32) {
2018 mask_be32 = *(__be32 *)&mask;
2019 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2020 } else if (field_bsize == 16) {
2021 mask_be16 = *(__be16 *)&mask;
2022 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2025 first = find_first_bit(&mask, field_bsize);
2026 next_z = find_next_zero_bit(&mask, field_bsize, first);
2027 last = find_last_bit(&mask, field_bsize);
2028 if (first < next_z && next_z < last) {
2029 NL_SET_ERR_MSG_MOD(extack,
2030 "rewrite of few sub-fields isn't supported");
2031 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2036 MLX5_SET(set_action_in, action, action_type, cmd);
2037 MLX5_SET(set_action_in, action, field, f->field);
2039 if (cmd == MLX5_ACTION_TYPE_SET) {
2040 MLX5_SET(set_action_in, action, offset, first);
2041 /* length is num of bits to be written, zero means length of 32 */
2042 MLX5_SET(set_action_in, action, length, (last - first + 1));
2045 if (field_bsize == 32)
2046 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2047 else if (field_bsize == 16)
2048 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2049 else if (field_bsize == 8)
2050 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2052 action += action_size;
2056 parse_attr->num_mod_hdr_actions = nactions;
2060 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2061 const struct tc_action *a, int namespace,
2062 struct mlx5e_tc_flow_parse_attr *parse_attr)
2064 int nkeys, action_size, max_actions;
2066 nkeys = tcf_pedit_nkeys(a);
2067 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2069 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2070 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2071 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2072 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2074 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2075 max_actions = min(max_actions, nkeys * 16);
2077 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2078 if (!parse_attr->mod_hdr_actions)
2081 parse_attr->max_mod_hdr_actions = max_actions;
2085 static const struct pedit_headers zero_masks = {};
2087 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2088 const struct tc_action *a, int namespace,
2089 struct mlx5e_tc_flow_parse_attr *parse_attr,
2090 struct netlink_ext_ack *extack)
2092 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
2093 int nkeys, i, err = -EOPNOTSUPP;
2094 u32 mask, val, offset;
2097 nkeys = tcf_pedit_nkeys(a);
2099 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2100 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2102 for (i = 0; i < nkeys; i++) {
2103 htype = tcf_pedit_htype(a, i);
2104 cmd = tcf_pedit_cmd(a, i);
2105 err = -EOPNOTSUPP; /* can't be all optimistic */
2107 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
2108 NL_SET_ERR_MSG_MOD(extack,
2109 "legacy pedit isn't offloaded");
2113 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
2114 NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
2118 mask = tcf_pedit_mask(a, i);
2119 val = tcf_pedit_val(a, i);
2120 offset = tcf_pedit_offset(a, i);
2122 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
2127 if (!parse_attr->mod_hdr_actions) {
2128 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2133 err = offload_pedit_fields(masks, vals, parse_attr, extack);
2135 goto out_dealloc_parsed_actions;
2137 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2138 cmd_masks = &masks[cmd];
2139 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2140 NL_SET_ERR_MSG_MOD(extack,
2141 "attempt to offload an unsupported field");
2142 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2143 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2144 16, 1, cmd_masks, sizeof(zero_masks), true);
2146 goto out_dealloc_parsed_actions;
2152 out_dealloc_parsed_actions:
2153 kfree(parse_attr->mod_hdr_actions);
2158 static bool csum_offload_supported(struct mlx5e_priv *priv,
2161 struct netlink_ext_ack *extack)
2163 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2164 TCA_CSUM_UPDATE_FLAG_UDP;
2166 /* The HW recalcs checksums only if re-writing headers */
2167 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2168 NL_SET_ERR_MSG_MOD(extack,
2169 "TC csum action is only offloaded with pedit");
2170 netdev_warn(priv->netdev,
2171 "TC csum action is only offloaded with pedit\n");
2175 if (update_flags & ~prot_flags) {
2176 NL_SET_ERR_MSG_MOD(extack,
2177 "can't offload TC csum action for some header/s");
2178 netdev_warn(priv->netdev,
2179 "can't offload TC csum action for some header/s - flags %#x\n",
2187 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2188 struct tcf_exts *exts,
2190 struct netlink_ext_ack *extack)
2192 const struct tc_action *a;
2193 bool modify_ip_header;
2199 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2200 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2202 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2204 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2206 /* for non-IP we only re-write MACs, so we're okay */
2207 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2210 modify_ip_header = false;
2211 tcf_exts_for_each_action(i, a, exts) {
2214 if (!is_tcf_pedit(a))
2217 nkeys = tcf_pedit_nkeys(a);
2218 for (k = 0; k < nkeys; k++) {
2219 htype = tcf_pedit_htype(a, k);
2220 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2221 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2222 modify_ip_header = true;
2228 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2229 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2230 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2231 NL_SET_ERR_MSG_MOD(extack,
2232 "can't offload re-write of non TCP/UDP");
2233 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2241 static bool actions_match_supported(struct mlx5e_priv *priv,
2242 struct tcf_exts *exts,
2243 struct mlx5e_tc_flow_parse_attr *parse_attr,
2244 struct mlx5e_tc_flow *flow,
2245 struct netlink_ext_ack *extack)
2249 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2250 actions = flow->esw_attr->action;
2252 actions = flow->nic_attr->action;
2254 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2255 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2258 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2259 return modify_header_match_supported(&parse_attr->spec, exts,
2265 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2267 struct mlx5_core_dev *fmdev, *pmdev;
2268 u64 fsystem_guid, psystem_guid;
2271 pmdev = peer_priv->mdev;
2273 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2274 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2276 return (fsystem_guid == psystem_guid);
2279 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2280 struct mlx5e_tc_flow_parse_attr *parse_attr,
2281 struct mlx5e_tc_flow *flow,
2282 struct netlink_ext_ack *extack)
2284 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2285 const struct tc_action *a;
2289 if (!tcf_exts_has_actions(exts))
2292 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2294 tcf_exts_for_each_action(i, a, exts) {
2295 if (is_tcf_gact_shot(a)) {
2296 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2297 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2298 flow_table_properties_nic_receive.flow_counter))
2299 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2303 if (is_tcf_pedit(a)) {
2304 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
2305 parse_attr, extack);
2309 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2310 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2314 if (is_tcf_csum(a)) {
2315 if (csum_offload_supported(priv, action,
2316 tcf_csum_update_flags(a),
2323 if (is_tcf_mirred_egress_redirect(a)) {
2324 struct net_device *peer_dev = tcf_mirred_dev(a);
2326 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2327 same_hw_devs(priv, netdev_priv(peer_dev))) {
2328 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2329 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2330 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2331 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2333 NL_SET_ERR_MSG_MOD(extack,
2334 "device is not on same HW, can't offload");
2335 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2342 if (is_tcf_skbedit_mark(a)) {
2343 u32 mark = tcf_skbedit_mark(a);
2345 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2346 NL_SET_ERR_MSG_MOD(extack,
2347 "Bad flow mark - only 16 bit is supported");
2351 attr->flow_tag = mark;
2352 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2359 attr->action = action;
2360 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2366 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2367 struct ip_tunnel_key *b)
2369 return memcmp(a, b, sizeof(*a));
2372 static inline int hash_encap_info(struct ip_tunnel_key *key)
2374 return jhash(key, sizeof(*key), 0);
2378 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2379 struct net_device *peer_netdev)
2381 struct mlx5e_priv *peer_priv;
2383 peer_priv = netdev_priv(peer_netdev);
2385 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2386 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2387 same_hw_devs(priv, peer_priv) &&
2388 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2389 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2394 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2395 struct ip_tunnel_info *tun_info,
2396 struct net_device *mirred_dev,
2397 struct net_device **encap_dev,
2398 struct mlx5e_tc_flow *flow,
2399 struct netlink_ext_ack *extack,
2402 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2403 unsigned short family = ip_tunnel_info_af(tun_info);
2404 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2405 struct ip_tunnel_key *key = &tun_info->key;
2406 struct mlx5e_encap_entry *e;
2411 hash_key = hash_encap_info(key);
2413 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2414 encap_hlist, hash_key) {
2415 if (!cmp_encap_info(&e->tun_info.key, key)) {
2421 /* must verify if encap is valid or not */
2425 e = kzalloc(sizeof(*e), GFP_KERNEL);
2429 e->tun_info = *tun_info;
2430 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
2434 INIT_LIST_HEAD(&e->flows);
2436 if (family == AF_INET)
2437 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
2438 else if (family == AF_INET6)
2439 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
2441 if (err && err != -EAGAIN)
2444 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2447 list_add(&flow->encaps[out_index].list, &e->flows);
2448 flow->encaps[out_index].index = out_index;
2449 *encap_dev = e->out_dev;
2450 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
2451 attr->dests[out_index].encap_id = e->encap_id;
2452 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
2464 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2465 const struct tc_action *a,
2466 struct mlx5_esw_flow_attr *attr,
2469 u8 vlan_idx = attr->total_vlan;
2471 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2474 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2476 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2477 MLX5_FS_VLAN_DEPTH))
2480 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2482 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2484 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2485 attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2486 attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2487 attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2488 if (!attr->vlan_proto[vlan_idx])
2489 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2492 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2493 MLX5_FS_VLAN_DEPTH))
2496 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2498 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2499 (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2500 tcf_vlan_push_prio(a)))
2503 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2505 } else { /* action is TCA_VLAN_ACT_MODIFY */
2509 attr->total_vlan = vlan_idx + 1;
2514 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2515 struct mlx5e_tc_flow_parse_attr *parse_attr,
2516 struct mlx5e_tc_flow *flow,
2517 struct netlink_ext_ack *extack)
2519 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2520 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2521 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2522 struct ip_tunnel_info *info = NULL;
2523 const struct tc_action *a;
2528 if (!tcf_exts_has_actions(exts))
2531 attr->in_rep = rpriv->rep;
2532 attr->in_mdev = priv->mdev;
2534 tcf_exts_for_each_action(i, a, exts) {
2535 if (is_tcf_gact_shot(a)) {
2536 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2537 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2541 if (is_tcf_pedit(a)) {
2542 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2543 parse_attr, extack);
2547 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2548 attr->split_count = attr->out_count;
2552 if (is_tcf_csum(a)) {
2553 if (csum_offload_supported(priv, action,
2554 tcf_csum_update_flags(a),
2561 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
2562 struct mlx5e_priv *out_priv;
2563 struct net_device *out_dev;
2565 out_dev = tcf_mirred_dev(a);
2567 /* out_dev is NULL when filters with
2568 * non-existing mirred device are replayed to
2574 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2575 NL_SET_ERR_MSG_MOD(extack,
2576 "can't support more output ports, can't offload forwarding");
2577 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2582 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2583 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2584 if (switchdev_port_same_parent_id(priv->netdev,
2586 is_merged_eswitch_dev(priv, out_dev)) {
2587 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2588 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
2589 struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
2592 netif_is_lag_master(uplink_upper) &&
2593 uplink_upper == out_dev)
2594 out_dev = uplink_dev;
2596 if (!mlx5e_eswitch_rep(out_dev))
2599 out_priv = netdev_priv(out_dev);
2600 rpriv = out_priv->ppriv;
2601 attr->dests[attr->out_count].rep = rpriv->rep;
2602 attr->dests[attr->out_count].mdev = out_priv->mdev;
2605 parse_attr->mirred_ifindex[attr->out_count] =
2607 parse_attr->tun_info[attr->out_count] = *info;
2609 attr->parse_attr = parse_attr;
2610 attr->dests[attr->out_count].flags |=
2611 MLX5_ESW_DEST_ENCAP;
2613 /* attr->dests[].rep is resolved when we
2616 } else if (parse_attr->filter_dev != priv->netdev) {
2617 /* All mlx5 devices are called to configure
2618 * high level device filters. Therefore, the
2619 * *attempt* to install a filter on invalid
2620 * eswitch should not trigger an explicit error
2624 NL_SET_ERR_MSG_MOD(extack,
2625 "devices are not on same switch HW, can't offload forwarding");
2626 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2627 priv->netdev->name, out_dev->name);
2633 if (is_tcf_tunnel_set(a)) {
2634 info = tcf_tunnel_info(a);
2642 if (is_tcf_vlan(a)) {
2643 err = parse_tc_vlan_action(priv, a, attr, &action);
2648 attr->split_count = attr->out_count;
2652 if (is_tcf_tunnel_release(a)) {
2653 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2657 if (is_tcf_gact_goto_chain(a)) {
2658 u32 dest_chain = tcf_gact_goto_chain_index(a);
2659 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2661 if (dest_chain <= attr->chain) {
2662 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2665 if (dest_chain > max_chain) {
2666 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2669 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2670 attr->dest_chain = dest_chain;
2678 attr->action = action;
2679 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2682 if (attr->dest_chain) {
2683 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2684 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
2687 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2690 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2691 NL_SET_ERR_MSG_MOD(extack,
2692 "current firmware doesn't support split rule for port mirroring");
2693 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2700 static void get_flags(int flags, u16 *flow_flags)
2702 u16 __flow_flags = 0;
2704 if (flags & MLX5E_TC_INGRESS)
2705 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2706 if (flags & MLX5E_TC_EGRESS)
2707 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2709 if (flags & MLX5E_TC_ESW_OFFLOAD)
2710 __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2711 if (flags & MLX5E_TC_NIC_OFFLOAD)
2712 __flow_flags |= MLX5E_TC_FLOW_NIC;
2714 *flow_flags = __flow_flags;
2717 static const struct rhashtable_params tc_ht_params = {
2718 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2719 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2720 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2721 .automatic_shrinking = true,
2724 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
2726 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2727 struct mlx5e_rep_priv *uplink_rpriv;
2729 if (flags & MLX5E_TC_ESW_OFFLOAD) {
2730 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2731 return &uplink_rpriv->uplink_priv.tc_ht;
2732 } else /* NIC offload */
2733 return &priv->fs.tc.ht;
2736 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
2738 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2739 bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
2740 flow->flags & MLX5E_TC_FLOW_INGRESS;
2741 bool act_is_encap = !!(attr->action &
2742 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
2743 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
2744 MLX5_DEVCOM_ESW_OFFLOADS);
2746 return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) &&
2747 (is_rep_ingress || act_is_encap);
2751 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
2752 struct tc_cls_flower_offload *f, u16 flow_flags,
2753 struct mlx5e_tc_flow_parse_attr **__parse_attr,
2754 struct mlx5e_tc_flow **__flow)
2756 struct mlx5e_tc_flow_parse_attr *parse_attr;
2757 struct mlx5e_tc_flow *flow;
2760 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2761 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2762 if (!parse_attr || !flow) {
2767 flow->cookie = f->cookie;
2768 flow->flags = flow_flags;
2772 *__parse_attr = parse_attr;
2783 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2784 struct tc_cls_flower_offload *f,
2786 struct net_device *filter_dev,
2787 struct mlx5_eswitch_rep *in_rep,
2788 struct mlx5_core_dev *in_mdev,
2789 struct mlx5e_tc_flow **__flow)
2791 struct netlink_ext_ack *extack = f->common.extack;
2792 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2793 struct mlx5e_tc_flow_parse_attr *parse_attr;
2794 struct mlx5e_tc_flow *flow;
2797 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2798 attr_size = sizeof(struct mlx5_esw_flow_attr);
2799 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2800 &parse_attr, &flow);
2803 parse_attr->filter_dev = filter_dev;
2804 flow->esw_attr->parse_attr = parse_attr;
2805 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2810 flow->esw_attr->chain = f->common.chain_index;
2811 flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
2812 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
2816 flow->esw_attr->in_rep = in_rep;
2817 flow->esw_attr->in_mdev = in_mdev;
2819 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
2820 MLX5_COUNTER_SOURCE_ESWITCH)
2821 flow->esw_attr->counter_dev = in_mdev;
2823 flow->esw_attr->counter_dev = priv->mdev;
2825 err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
2840 static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
2841 struct mlx5e_tc_flow *flow)
2843 struct mlx5e_priv *priv = flow->priv, *peer_priv;
2844 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
2845 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
2846 struct mlx5e_tc_flow_parse_attr *parse_attr;
2847 struct mlx5e_rep_priv *peer_urpriv;
2848 struct mlx5e_tc_flow *peer_flow;
2849 struct mlx5_core_dev *in_mdev;
2852 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2856 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
2857 peer_priv = netdev_priv(peer_urpriv->netdev);
2859 /* in_mdev is assigned of which the packet originated from.
2860 * So packets redirected to uplink use the same mdev of the
2861 * original flow and packets redirected from uplink use the
2864 if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
2865 in_mdev = peer_priv->mdev;
2867 in_mdev = priv->mdev;
2869 parse_attr = flow->esw_attr->parse_attr;
2870 err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
2871 parse_attr->filter_dev,
2872 flow->esw_attr->in_rep, in_mdev, &peer_flow);
2876 flow->peer_flow = peer_flow;
2877 flow->flags |= MLX5E_TC_FLOW_DUP;
2878 mutex_lock(&esw->offloads.peer_mutex);
2879 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
2880 mutex_unlock(&esw->offloads.peer_mutex);
2883 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2888 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2889 struct tc_cls_flower_offload *f,
2891 struct net_device *filter_dev,
2892 struct mlx5e_tc_flow **__flow)
2894 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2895 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
2896 struct mlx5_core_dev *in_mdev = priv->mdev;
2897 struct mlx5e_tc_flow *flow;
2900 err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
2905 if (is_peer_flow_needed(flow)) {
2906 err = mlx5e_tc_add_fdb_peer_flow(f, flow);
2908 mlx5e_tc_del_fdb_flow(priv, flow);
2922 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2923 struct tc_cls_flower_offload *f,
2925 struct net_device *filter_dev,
2926 struct mlx5e_tc_flow **__flow)
2928 struct netlink_ext_ack *extack = f->common.extack;
2929 struct mlx5e_tc_flow_parse_attr *parse_attr;
2930 struct mlx5e_tc_flow *flow;
2933 /* multi-chain not supported for NIC rules */
2934 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
2937 flow_flags |= MLX5E_TC_FLOW_NIC;
2938 attr_size = sizeof(struct mlx5_nic_flow_attr);
2939 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2940 &parse_attr, &flow);
2944 parse_attr->filter_dev = filter_dev;
2945 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2950 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
2954 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
2958 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2972 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
2973 struct tc_cls_flower_offload *f,
2975 struct net_device *filter_dev,
2976 struct mlx5e_tc_flow **flow)
2978 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2982 get_flags(flags, &flow_flags);
2984 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
2987 if (esw && esw->mode == SRIOV_OFFLOADS)
2988 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
2991 err = mlx5e_add_nic_flow(priv, f, flow_flags,
2997 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
2998 struct tc_cls_flower_offload *f, int flags)
3000 struct netlink_ext_ack *extack = f->common.extack;
3001 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3002 struct mlx5e_tc_flow *flow;
3005 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3007 NL_SET_ERR_MSG_MOD(extack,
3008 "flow cookie already exists, ignoring");
3009 netdev_warn_once(priv->netdev,
3010 "flow cookie %lx already exists, ignoring\n",
3015 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3019 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
3026 mlx5e_tc_del_flow(priv, flow);
3032 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
3033 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
3035 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3037 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
3043 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3044 struct tc_cls_flower_offload *f, int flags)
3046 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3047 struct mlx5e_tc_flow *flow;
3049 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3050 if (!flow || !same_flow_direction(flow, flags))
3053 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3055 mlx5e_tc_del_flow(priv, flow);
3062 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3063 struct tc_cls_flower_offload *f, int flags)
3065 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3066 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3067 struct mlx5_eswitch *peer_esw;
3068 struct mlx5e_tc_flow *flow;
3069 struct mlx5_fc *counter;
3074 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3075 if (!flow || !same_flow_direction(flow, flags))
3078 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
3081 counter = mlx5e_tc_get_counter(flow);
3085 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3087 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3091 if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
3092 (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
3097 counter = mlx5e_tc_get_counter(flow->peer_flow);
3098 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
3101 packets += packets2;
3102 lastuse = max_t(u64, lastuse, lastuse2);
3105 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3108 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
3113 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3114 struct mlx5e_priv *peer_priv)
3116 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3117 struct mlx5e_hairpin_entry *hpe;
3121 if (!same_hw_devs(priv, peer_priv))
3124 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3126 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3127 if (hpe->peer_vhca_id == peer_vhca_id)
3128 hpe->hp->pair->peer_gone = true;
3132 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3133 unsigned long event, void *ptr)
3135 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3136 struct mlx5e_flow_steering *fs;
3137 struct mlx5e_priv *peer_priv;
3138 struct mlx5e_tc_table *tc;
3139 struct mlx5e_priv *priv;
3141 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3142 event != NETDEV_UNREGISTER ||
3143 ndev->reg_state == NETREG_REGISTERED)
3146 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3147 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3148 priv = container_of(fs, struct mlx5e_priv, fs);
3149 peer_priv = netdev_priv(ndev);
3150 if (priv == peer_priv ||
3151 !(priv->netdev->features & NETIF_F_HW_TC))
3154 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3159 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3161 struct mlx5e_tc_table *tc = &priv->fs.tc;
3164 hash_init(tc->mod_hdr_tbl);
3165 hash_init(tc->hairpin_tbl);
3167 err = rhashtable_init(&tc->ht, &tc_ht_params);
3171 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3172 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3173 tc->netdevice_nb.notifier_call = NULL;
3174 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3180 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3182 struct mlx5e_tc_flow *flow = ptr;
3183 struct mlx5e_priv *priv = flow->priv;
3185 mlx5e_tc_del_flow(priv, flow);
3189 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3191 struct mlx5e_tc_table *tc = &priv->fs.tc;
3193 if (tc->netdevice_nb.notifier_call)
3194 unregister_netdevice_notifier(&tc->netdevice_nb);
3196 rhashtable_destroy(&tc->ht);
3198 if (!IS_ERR_OR_NULL(tc->t)) {
3199 mlx5_destroy_flow_table(tc->t);
3204 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3206 return rhashtable_init(tc_ht, &tc_ht_params);
3209 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3211 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3214 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
3216 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3218 return atomic_read(&tc_ht->nelems);
3221 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
3223 struct mlx5e_tc_flow *flow, *tmp;
3225 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
3226 __mlx5e_tc_del_fdb_peer_flow(flow);