OSDN Git Service

Merge tag 'mlx5-updates-2022-03-17' of git://git.kernel.org/pub/scm/linux/kernel...
authorDavid S. Miller <davem@davemloft.net>
Fri, 18 Mar 2022 10:30:00 +0000 (10:30 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 18 Mar 2022 10:30:00 +0000 (10:30 +0000)
Saeed Mahameed says:

====================
mlx5-updates-2022-03-17

1) From Maxim Mikityanskiy,
   Datapath improvements in preparation for XDP multi buffer

   This series contains general improvements for the datapath that are
   useful for the upcoming XDP multi buffer support:

   a. Non-linear legacy RQ: validate MTU for robustness, build the linear
      part of SKB over the first hardware fragment (instead of copying the
      packet headers), adjust headroom calculations to allow enabling headroom
      in the non-linear mode (useful for XDP multi buffer).

   b. XDP: do the XDP program test before function call, optimize
      parameters of mlx5e_xdp_handle.

2) From Rongwei Liu, DR, reduce steering memory usage
   Currently, mlx5 driver uses mlx5_htbl/chunk/ste to organize
   steering logic. However there is a little memory waste.

   This update targets to reduce steering memory footprint by:
   a. Adjust struct member layout.
   b. Remove duplicated indicator by using simple functions call.

   With 500k TX rules(3 ste) plus 500k RX rules(6 stes), these patches
   can save around 17% memory.

3) Three cleanup commits at the end of this series.
===================

Signed-off-by: David S. Miller <davem@davemloft.net>
17 files changed:
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
include/linux/mlx5/driver.h

index d5408f6..e52b0ba 100644 (file)
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
        return cpu_handle;
 }
 
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
-                              struct mlx5_frag_buf *buf, int node)
-{
-       dma_addr_t t;
-
-       buf->size = size;
-       buf->npages       = 1;
-       buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-
-       buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
-       if (!buf->frags)
-               return -ENOMEM;
-
-       buf->frags->buf   = mlx5_dma_zalloc_coherent_node(dev, size,
-                                                         &t, node);
-       if (!buf->frags->buf)
-               goto err_out;
-
-       buf->frags->map = t;
-
-       while (t & ((1 << buf->page_shift) - 1)) {
-               --buf->page_shift;
-               buf->npages *= 2;
-       }
-
-       return 0;
-err_out:
-       kfree(buf->frags);
-       return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
-                  int size, struct mlx5_frag_buf *buf)
-{
-       return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
-       dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
-                         buf->frags->map);
-
-       kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
                             struct mlx5_frag_buf *buf, int node)
 {
@@ -286,19 +239,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
 }
 EXPORT_SYMBOL_GPL(mlx5_db_free);
 
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
-       u64 addr;
-       int i;
-
-       for (i = 0; i < buf->npages; i++) {
-               addr = buf->frags->map + (i << buf->page_shift);
-
-               pas[i] = cpu_to_be64(addr);
-       }
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
 {
        int i;
index 0bd8698..5c4711b 100644 (file)
@@ -188,12 +188,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk)
 {
-       bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
-               mlx5e_rx_is_linear_skb(params, xsk) :
-               mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+       u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
 
-       return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
-               mlx5e_get_linear_rq_headroom(params, xsk) : 0;
+       if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+               return linear_headroom;
+
+       if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+               return linear_headroom;
+
+       if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+               return linear_headroom;
+
+       return 0;
 }
 
 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -392,16 +398,25 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
        };
 }
 
+static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size)
+{
+       /* Optimization for small packets: the last fragment is bigger than the others. */
+       return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
+}
+
 #define DEFAULT_FRAG_SIZE (2048)
 
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
-                                     struct mlx5e_params *params,
-                                     struct mlx5e_xsk_param *xsk,
-                                     struct mlx5e_rq_frags_info *info)
+static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+                                    struct mlx5e_params *params,
+                                    struct mlx5e_xsk_param *xsk,
+                                    struct mlx5e_rq_frags_info *info)
 {
        u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        int frag_size_max = DEFAULT_FRAG_SIZE;
+       int first_frag_size_max;
        u32 buf_size = 0;
+       u16 headroom;
+       int max_mtu;
        int i;
 
        if (mlx5_fpga_is_ipsec_device(mdev))
@@ -420,21 +435,42 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
                goto out;
        }
 
-       if (byte_count > PAGE_SIZE +
-           (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+       headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+       first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+       max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max);
+       if (byte_count > max_mtu) {
                frag_size_max = PAGE_SIZE;
+               first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+               max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max);
+               if (byte_count > max_mtu) {
+                       mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
+                                     params->sw_mtu, max_mtu);
+                       return -EINVAL;
+               }
+       }
 
        i = 0;
        while (buf_size < byte_count) {
                int frag_size = byte_count - buf_size;
 
-               if (i < MLX5E_MAX_RX_FRAGS - 1)
+               if (i == 0)
+                       frag_size = min(frag_size, first_frag_size_max);
+               else if (i < MLX5E_MAX_RX_FRAGS - 1)
                        frag_size = min(frag_size, frag_size_max);
 
                info->arr[i].frag_size = frag_size;
+               buf_size += frag_size;
+
+               if (i == 0) {
+                       /* Ensure that headroom and tailroom are included. */
+                       frag_size += headroom;
+                       frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+               }
+
                info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
 
-               buf_size += frag_size;
                i++;
        }
        info->num_frags = i;
@@ -444,6 +480,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
 out:
        info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
        info->log_num_frags = order_base_2(info->num_frags);
+
+       return 0;
 }
 
 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
@@ -540,6 +578,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
        void *rqc = param->rqc;
        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
        int ndsegs = 1;
+       int err;
 
        switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
@@ -579,7 +618,9 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
        }
        default: /* MLX5_WQ_TYPE_CYCLIC */
                MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
-               mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+               err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+               if (err)
+                       return err;
                ndsegs = param->frags_info.num_frags;
        }
 
index 7db9d8e..e49f511 100644 (file)
@@ -1161,7 +1161,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
        }
 
        rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
-       mlx5_tc_ct_entry_remove_from_tuples(entry);
        spin_unlock_bh(&ct_priv->ht_lock);
 
        mlx5_tc_ct_entry_put(entry);
index a7f0203..6aa77f0 100644 (file)
@@ -120,19 +120,14 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 
 /* returns true if packet was consumed by xdp */
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
-                     u32 *len, struct xdp_buff *xdp)
+                     struct bpf_prog *prog, struct xdp_buff *xdp)
 {
-       struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
        u32 act;
        int err;
 
-       if (!prog)
-               return false;
-
        act = bpf_prog_run_xdp(prog, xdp);
        switch (act) {
        case XDP_PASS:
-               *len = xdp->data_end - xdp->data;
                return false;
        case XDP_TX:
                if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
index c62f11d..20d8af6 100644 (file)
@@ -48,7 +48,7 @@
 struct mlx5e_xsk_param;
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
-                     u32 *len, struct xdp_buff *xdp);
+                     struct bpf_prog *prog, struct xdp_buff *xdp);
 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
index 8e7b877..021da08 100644 (file)
@@ -4,6 +4,7 @@
 #include "rx.h"
 #include "en/xdp.h"
 #include <net/xdp_sock_drv.h>
+#include <linux/filter.h>
 
 /* RX data path */
 
@@ -30,7 +31,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
                                                    u32 page_idx)
 {
        struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
-       u32 cqe_bcnt32 = cqe_bcnt;
+       struct bpf_prog *prog;
 
        /* Check packet size. Note LRO doesn't use linear SKB */
        if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -45,7 +46,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
         */
        WARN_ON_ONCE(head_offset);
 
-       xdp->data_end = xdp->data + cqe_bcnt32;
+       xdp->data_end = xdp->data + cqe_bcnt;
        xdp_set_data_meta_invalid(xdp);
        xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        net_prefetch(xdp->data);
@@ -65,7 +66,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
         * allocated first from the Reuse Ring, so it has enough space.
         */
 
-       if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
+       prog = rcu_dereference(rq->xdp_prog);
+       if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
                if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
                        __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
                return NULL; /* page/packet was consumed by XDP */
@@ -74,7 +76,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
        /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
         * frame. On SKB allocation failure, NULL is returned.
         */
-       return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
+       return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
 }
 
 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -83,6 +85,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
                                              u32 cqe_bcnt)
 {
        struct xdp_buff *xdp = wi->di->xsk;
+       struct bpf_prog *prog;
 
        /* wi->offset is not used in this function, because xdp->data and the
         * DMA address point directly to the necessary place. Furthermore, the
@@ -101,12 +104,13 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
                return NULL;
        }
 
-       if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
+       prog = rcu_dereference(rq->xdp_prog);
+       if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
                return NULL; /* page/packet was consumed by XDP */
 
        /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
         * will be handled by mlx5e_put_rx_frag.
         * On SKB allocation failure, NULL is returned.
         */
-       return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
+       return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
 }
index 074a44b..4b8699f 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/bitmap.h>
+#include <linux/filter.h>
 #include <net/ip6_checksum.h>
 #include <net/page_pool.h>
 #include <net/inet_ecn.h>
@@ -373,12 +374,15 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
        int i;
 
        for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+               u16 headroom;
+
                err = mlx5e_get_rx_frag(rq, frag);
                if (unlikely(err))
                        goto free_frags;
 
+               headroom = i == 0 ? rq->buff.headroom : 0;
                wqe->data[i].addr = cpu_to_be64(frag->di->addr +
-                                               frag->offset + rq->buff.headroom);
+                                               frag->offset + headroom);
        }
 
        return 0;
@@ -1520,11 +1524,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 {
        struct mlx5e_dma_info *di = wi->di;
        u16 rx_headroom = rq->buff.headroom;
-       struct xdp_buff xdp;
+       struct bpf_prog *prog;
        struct sk_buff *skb;
+       u32 metasize = 0;
        void *va, *data;
        u32 frag_size;
-       u32 metasize;
 
        va             = page_address(di->page) + wi->offset;
        data           = va + rx_headroom;
@@ -1532,16 +1536,22 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
 
        dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
                                      frag_size, DMA_FROM_DEVICE);
-       net_prefetchw(va); /* xdp_frame data area */
        net_prefetch(data);
 
-       mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
-       if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
-               return NULL; /* page/packet was consumed by XDP */
+       prog = rcu_dereference(rq->xdp_prog);
+       if (prog) {
+               struct xdp_buff xdp;
+
+               net_prefetchw(va); /* xdp_frame data area */
+               mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+               if (mlx5e_xdp_handle(rq, di, prog, &xdp))
+                       return NULL; /* page/packet was consumed by XDP */
 
-       rx_headroom = xdp.data - xdp.data_hard_start;
+               rx_headroom = xdp.data - xdp.data_hard_start;
+               metasize = xdp.data - xdp.data_meta;
+               cqe_bcnt = xdp.data_end - xdp.data;
+       }
        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
-       metasize = xdp.data - xdp.data_meta;
        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
        if (unlikely(!skb))
                return NULL;
@@ -1557,43 +1567,45 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
                             struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
 {
        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
-       struct mlx5e_wqe_frag_info *head_wi = wi;
-       u16 headlen      = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
-       u16 frag_headlen = headlen;
-       u16 byte_cnt     = cqe_bcnt - headlen;
+       u16 rx_headroom = rq->buff.headroom;
+       struct mlx5e_dma_info *di = wi->di;
+       u32 frag_consumed_bytes;
+       u32 first_frag_size;
        struct sk_buff *skb;
+       void *va;
+
+       va = page_address(di->page) + wi->offset;
+       frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
+       first_frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + frag_consumed_bytes);
+
+       dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+                                     first_frag_size, DMA_FROM_DEVICE);
+       net_prefetch(va + rx_headroom);
 
        /* XDP is not supported in this configuration, as incoming packets
         * might spread among multiple pages.
         */
-       skb = napi_alloc_skb(rq->cq.napi,
-                            ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
-       if (unlikely(!skb)) {
-               rq->stats->buff_alloc_err++;
+       skb = mlx5e_build_linear_skb(rq, va, first_frag_size, rx_headroom,
+                                    frag_consumed_bytes, 0);
+       if (unlikely(!skb))
                return NULL;
-       }
 
-       net_prefetchw(skb->data);
+       page_ref_inc(di->page);
+
+       cqe_bcnt -= frag_consumed_bytes;
+       frag_info++;
+       wi++;
 
-       while (byte_cnt) {
-               u16 frag_consumed_bytes =
-                       min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
+       while (cqe_bcnt) {
+               frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
 
-               mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
+               mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset,
                                   frag_consumed_bytes, frag_info->frag_stride);
-               byte_cnt -= frag_consumed_bytes;
-               frag_headlen = 0;
+               cqe_bcnt -= frag_consumed_bytes;
                frag_info++;
                wi++;
        }
 
-       /* copy header */
-       mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
-                             headlen);
-       /* skb linear part was allocated with headlen and aligned to long */
-       skb->tail += headlen;
-       skb->len  += headlen;
-
        return skb;
 }
 
@@ -1836,12 +1848,11 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 {
        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
        u16 rx_headroom = rq->buff.headroom;
-       u32 cqe_bcnt32 = cqe_bcnt;
-       struct xdp_buff xdp;
+       struct bpf_prog *prog;
        struct sk_buff *skb;
+       u32 metasize = 0;
        void *va, *data;
        u32 frag_size;
-       u32 metasize;
 
        /* Check packet size. Note LRO doesn't use linear SKB */
        if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -1851,24 +1862,30 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
 
        va             = page_address(di->page) + head_offset;
        data           = va + rx_headroom;
-       frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+       frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
 
        dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
                                      frag_size, DMA_FROM_DEVICE);
-       net_prefetchw(va); /* xdp_frame data area */
        net_prefetch(data);
 
-       mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
-       if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
-               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
-                       __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
-               return NULL; /* page/packet was consumed by XDP */
-       }
+       prog = rcu_dereference(rq->xdp_prog);
+       if (prog) {
+               struct xdp_buff xdp;
+
+               net_prefetchw(va); /* xdp_frame data area */
+               mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+               if (mlx5e_xdp_handle(rq, di, prog, &xdp)) {
+                       if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                               __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+                       return NULL; /* page/packet was consumed by XDP */
+               }
 
-       rx_headroom = xdp.data - xdp.data_hard_start;
-       frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
-       metasize = xdp.data - xdp.data_meta;
-       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
+               rx_headroom = xdp.data - xdp.data_hard_start;
+               metasize = xdp.data - xdp.data_meta;
+               cqe_bcnt = xdp.data_end - xdp.data;
+       }
+       frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
        if (unlikely(!skb))
                return NULL;
 
index 743422a..850937c 100644 (file)
@@ -570,6 +570,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
 
        for (i = 0; i < num_actions; i++) {
                struct mlx5dr_action_dest_tbl *dest_tbl;
+               struct mlx5dr_icm_chunk *chunk;
                struct mlx5dr_action *action;
                int max_actions_type = 1;
                u32 action_type;
@@ -598,9 +599,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                                                   matcher->tbl->level,
                                                   dest_tbl->tbl->level);
                                }
-                               attr.final_icm_addr = rx_rule ?
-                                       dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
-                                       dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+                               chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+                                       dest_tbl->tbl->tx.s_anchor->chunk;
+                               attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
                        } else {
                                struct mlx5dr_cmd_query_flow_table_details output;
                                int ret;
@@ -1123,7 +1124,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
                }
 
                action->rewrite->data = (void *)hw_actions;
-               action->rewrite->index = (action->rewrite->chunk->icm_addr -
+               action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr
+                                         (action->rewrite->chunk) -
                                         dmn->info.caps.hdr_modify_icm_addr) /
                                         ACTION_CACHE_LINE_SIZE;
 
@@ -1702,7 +1704,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
        action->rewrite->modify_ttl = modify_ttl;
        action->rewrite->data = (u8 *)hw_actions;
        action->rewrite->num_of_actions = num_hw_actions;
-       action->rewrite->index = (chunk->icm_addr -
+       action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
                                  dmn->info.caps.hdr_modify_icm_addr) /
                                  ACTION_CACHE_LINE_SIZE;
 
index d232f1e..d5998ef 100644 (file)
@@ -217,7 +217,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
                                       DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
        }
 
-       dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+       dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
+                         DR_STE_SIZE_REDUCED);
 
        seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
                   dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
@@ -346,16 +347,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
                      const u64 matcher_id)
 {
        enum dr_dump_rec_type rec_type;
+       u64 s_icm_addr, e_icm_addr;
        int i, ret;
 
        rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
                           DR_DUMP_REC_TYPE_MATCHER_TX;
 
+       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
+       e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
        seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
                   rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
                   matcher_id, matcher_rx_tx->num_of_builders,
-                  dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
-                  dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+                  dr_dump_icm_to_idx(s_icm_addr),
+                  dr_dump_icm_to_idx(e_icm_addr));
 
        for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
                ret = dr_dump_matcher_builder(file,
@@ -426,12 +430,14 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
                    const u64 table_id)
 {
        enum dr_dump_rec_type rec_type;
+       u64 s_icm_addr;
 
        rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
                           DR_DUMP_REC_TYPE_TABLE_TX;
 
+       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
        seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
-                  dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+                  dr_dump_icm_to_idx(s_icm_addr));
 
        return 0;
 }
index e289cfd..4ca67fa 100644 (file)
@@ -57,6 +57,36 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
        return mlx5_core_create_mkey(mdev, mkey, in, inlen);
 }
 
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
+{
+       u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+       return (u64)offset * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
+{
+       return chunk->buddy_mem->icm_mr->mkey;
+}
+
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
+{
+       u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+       return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
+{
+       return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
+                       chunk->buddy_mem->pool->icm_type);
+}
+
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
+{
+       return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
+}
+
 static struct mlx5dr_icm_mr *
 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
 {
@@ -158,12 +188,13 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
 
 static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
 {
+       int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
        struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
 
        memset(chunk->hw_ste_arr, 0,
-              chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+              num_of_entries * dr_icm_buddy_get_ste_size(buddy));
        memset(chunk->ste_arr, 0,
-              chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
+              num_of_entries * sizeof(chunk->ste_arr[0]));
 }
 
 static enum mlx5dr_icm_type
@@ -177,7 +208,7 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
 {
        enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
 
-       buddy->used_memory -= chunk->byte_size;
+       buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
        list_del(&chunk->chunk_list);
 
        if (icm_type == DR_ICM_TYPE_STE)
@@ -298,21 +329,14 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
 
        offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
 
-       chunk->rkey = buddy_mem_pool->icm_mr->mkey;
-       chunk->mr_addr = offset;
-       chunk->icm_addr =
-               (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
-       chunk->num_of_entries =
-               mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
-       chunk->byte_size =
-               mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
        chunk->seg = seg;
+       chunk->size = chunk_size;
        chunk->buddy_mem = buddy_mem_pool;
 
        if (pool->icm_type == DR_ICM_TYPE_STE)
                dr_icm_chunk_ste_init(chunk, offset);
 
-       buddy_mem_pool->used_memory += chunk->byte_size;
+       buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
        INIT_LIST_HEAD(&chunk->chunk_list);
 
        /* chunk now is part of the used_list */
@@ -336,6 +360,7 @@ static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
 {
        struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+       u32 num_entries;
        int err;
 
        err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -348,9 +373,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
                struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
 
                list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
-                       mlx5dr_buddy_free_mem(buddy, chunk->seg,
-                                             ilog2(chunk->num_of_entries));
-                       pool->hot_memory_size -= chunk->byte_size;
+                       num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+                       mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
+                       pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
                        dr_icm_chunk_destroy(chunk, buddy);
                }
 
@@ -448,7 +473,7 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
        /* move the memory to the waiting list AKA "hot" */
        mutex_lock(&pool->mutex);
        list_move_tail(&chunk->chunk_list, &buddy->hot_list);
-       pool->hot_memory_size += chunk->byte_size;
+       pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
 
        /* Check if we have chunks that are waiting for sync-ste */
        if (dr_icm_pool_is_sync_required(pool))
index a4b5b41..0726848 100644 (file)
@@ -705,7 +705,7 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
 
        /* Connect start hash table to end anchor */
        info.type = CONNECT_MISS;
-       info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
        ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
                                                curr_nic_matcher->s_htbl,
                                                &info, false);
@@ -726,12 +726,14 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
                return ret;
 
        /* Update the pointing ste and next hash table */
-       curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
-       prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+       curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
+       prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
 
        if (next_nic_matcher) {
-               next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
-               curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+               next_nic_matcher->s_htbl->pointing_ste =
+                       curr_nic_matcher->e_anchor->chunk->ste_arr;
+               curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
+                       next_nic_matcher->s_htbl;
        }
 
        return 0;
@@ -1043,12 +1045,12 @@ static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
        if (next_nic_matcher) {
                info.type = CONNECT_HIT;
                info.hit_next_htbl = next_nic_matcher->s_htbl;
-               next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
-               prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+               next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
+               prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
        } else {
                info.type = CONNECT_MISS;
                info.miss_icm_addr = nic_tbl->default_icm_addr;
-               prev_anchor->ste_arr[0].next_htbl = NULL;
+               prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
        }
 
        return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
index b437457..ddfaf78 100644 (file)
@@ -21,12 +21,12 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
        if (!ste_info_last)
                return -ENOMEM;
 
-       mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
+       mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
                                 mlx5dr_ste_get_icm_addr(new_last_ste));
        list_add_tail(&new_last_ste->miss_list_node, miss_list);
 
        mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
-                                                 0, last_ste->hw_ste,
+                                                 0, mlx5dr_ste_get_hw_ste(last_ste),
                                                  ste_info_last, send_list, true);
 
        return 0;
@@ -41,6 +41,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
        struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
        struct mlx5dr_ste_htbl *new_htbl;
        struct mlx5dr_ste *ste;
+       u64 icm_addr;
 
        /* Create new table for miss entry */
        new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -53,9 +54,9 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
        }
 
        /* One and only entry, never grows */
-       ste = new_htbl->ste_arr;
-       mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
-                                nic_matcher->e_anchor->chunk->icm_addr);
+       ste = new_htbl->chunk->ste_arr;
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
        mlx5dr_htbl_get(new_htbl);
 
        return ste;
@@ -79,7 +80,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
        ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
 
        /* In collision entry, all members share the same miss_list_head */
-       ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+       ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
 
        /* Next table */
        if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
@@ -107,9 +108,11 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
         * is already written to the hw.
         */
        if (ste_info->size == DR_STE_SIZE_CTRL)
-               memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+                      ste_info->data, DR_STE_SIZE_CTRL);
        else
-               memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+                      ste_info->data, DR_STE_SIZE_REDUCED);
 
        ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
                                       ste_info->size, ste_info->offset);
@@ -159,7 +162,7 @@ dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
 
        /* Check if hw_ste is present in the list */
        list_for_each_entry(ste, miss_list, miss_list_node) {
-               if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+               if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
                        return ste;
        }
 
@@ -185,7 +188,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
        new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
 
        /* In collision entry, all members share the same miss_list_head */
-       new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+       new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
 
        /* Update the previous from the list */
        ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
@@ -235,6 +238,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
        bool use_update_list = false;
        u8 hw_ste[DR_STE_SIZE] = {};
        struct mlx5dr_ste *new_ste;
+       u64 icm_addr;
        int new_idx;
        u8 sb_idx;
 
@@ -243,12 +247,12 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
        mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
 
        /* Copy STE control and tag */
-       memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
-       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
-                                nic_matcher->e_anchor->chunk->icm_addr);
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
+       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
 
        new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
-       new_ste = &new_htbl->ste_arr[new_idx];
+       new_ste = &new_htbl->chunk->ste_arr[new_idx];
 
        if (mlx5dr_ste_is_not_used(new_ste)) {
                mlx5dr_htbl_get(new_htbl);
@@ -269,7 +273,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
                use_update_list = true;
        }
 
-       memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
 
        new_htbl->ctrl.num_of_valid_entries++;
 
@@ -334,7 +338,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
        int err = 0;
        int i;
 
-       cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+       cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
 
        if (cur_entries < 1) {
                mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
@@ -342,7 +346,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
        }
 
        for (i = 0; i < cur_entries; i++) {
-               cur_ste = &cur_htbl->ste_arr[i];
+               cur_ste = &cur_htbl->chunk->ste_arr[i];
                if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
                        continue;
 
@@ -398,7 +402,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
 
        /* Write new table to HW */
        info.type = CONNECT_MISS;
-       info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
        mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
                                     dmn->info.caps.gvmi,
                                     nic_dmn->type,
@@ -446,21 +450,21 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
                 * (48B len) which works only on first 32B
                 */
                mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
-                                       prev_htbl->ste_arr[0].hw_ste,
-                                       new_htbl->chunk->icm_addr,
-                                       new_htbl->chunk->num_of_entries);
+                                       prev_htbl->chunk->hw_ste_arr,
+                                       mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
+                                       mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
 
-               ste_to_update = &prev_htbl->ste_arr[0];
+               ste_to_update = &prev_htbl->chunk->ste_arr[0];
        } else {
                mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
-                                                    cur_htbl->pointing_ste->hw_ste,
+                                                    mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
                                                     new_htbl);
                ste_to_update = cur_htbl->pointing_ste;
        }
 
        mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
-                                                 0, ste_to_update->hw_ste, ste_info,
-                                                 update_list, false);
+                                                 0, mlx5dr_ste_get_hw_ste(ste_to_update),
+                                                 ste_info, update_list, false);
 
        return new_htbl;
 
@@ -489,10 +493,10 @@ static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
        struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
        enum mlx5dr_icm_chunk_size new_size;
 
-       new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+       new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
        new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
 
-       if (new_size == cur_htbl->chunk_size)
+       if (new_size == cur_htbl->chunk->size)
                return NULL; /* Skip rehash, we already at the max size */
 
        return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
@@ -659,13 +663,13 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
        struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
        int threshold;
 
-       if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+       if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
                return false;
 
        if (!mlx5dr_ste_htbl_may_grow(htbl))
                return false;
 
-       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
                return false;
 
        threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
@@ -755,6 +759,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
 {
        struct mlx5dr_domain *dmn = matcher->tbl->dmn;
        struct mlx5dr_ste_send_info *ste_info;
+       u64 icm_addr;
 
        /* Take ref on table, only on first time this ste is used */
        mlx5dr_htbl_get(cur_htbl);
@@ -762,8 +767,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
        /* new entry -> new branch */
        list_add_tail(&ste->miss_list_node, miss_list);
 
-       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
-                                nic_matcher->e_anchor->chunk->icm_addr);
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
 
        ste->ste_chain_location = ste_location;
 
@@ -822,7 +827,7 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
 again:
        index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
        miss_list = &cur_htbl->chunk->miss_list[index];
-       ste = &cur_htbl->ste_arr[index];
+       ste = &cur_htbl->chunk->ste_arr[index];
 
        if (mlx5dr_ste_is_not_used(ste)) {
                if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
@@ -858,7 +863,7 @@ again:
                                                  ste_location, send_ste_list);
                        if (!new_htbl) {
                                mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
-                                          cur_htbl->chunk_size);
+                                          cur_htbl->chunk->size);
                                mlx5dr_htbl_put(cur_htbl);
                        } else {
                                cur_htbl = new_htbl;
index 00aef47..ef19a66 100644 (file)
@@ -407,17 +407,17 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
                                   int *iterations,
                                   int *num_stes)
 {
+       u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
        int alloc_size;
 
-       if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
-               *iterations = htbl->chunk->byte_size /
-                       dmn->send_ring->max_post_send_size;
+       if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
+               *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
                *byte_size = dmn->send_ring->max_post_send_size;
                alloc_size = *byte_size;
                *num_stes = *byte_size / DR_STE_SIZE;
        } else {
                *iterations = 1;
-               *num_stes = htbl->chunk->num_of_entries;
+               *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
                alloc_size = *num_stes * DR_STE_SIZE;
        }
 
@@ -453,7 +453,7 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
        send_info.write.length = size;
        send_info.write.lkey = 0;
        send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
-       send_info.rkey = ste->htbl->chunk->rkey;
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
 
        return dr_postsend_icm_data(dmn, &send_info);
 }
@@ -462,7 +462,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                              struct mlx5dr_ste_htbl *htbl,
                              u8 *formatted_ste, u8 *mask)
 {
-       u32 byte_size = htbl->chunk->byte_size;
+       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
        int num_stes_per_iter;
        int iterations;
        u8 *data;
@@ -486,7 +486,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                 * need to add the bit_mask
                 */
                for (j = 0; j < num_stes_per_iter; j++) {
-                       struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
+                       struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
                        u32 ste_off = j * DR_STE_SIZE;
 
                        if (mlx5dr_ste_is_not_used(ste)) {
@@ -495,7 +495,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                        } else {
                                /* Copy data */
                                memcpy(data + ste_off,
-                                      htbl->ste_arr[ste_index + j].hw_ste,
+                                      htbl->chunk->hw_ste_arr +
+                                      DR_STE_SIZE_REDUCED * (ste_index + j),
                                       DR_STE_SIZE_REDUCED);
                                /* Copy bit_mask */
                                memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
@@ -511,8 +512,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
                send_info.write.length = byte_size;
                send_info.write.lkey = 0;
                send_info.remote_addr =
-                       mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
-               send_info.rkey = htbl->chunk->rkey;
+                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
 
                ret = dr_postsend_icm_data(dmn, &send_info);
                if (ret)
@@ -530,7 +531,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
                                        u8 *ste_init_data,
                                        bool update_hw_ste)
 {
-       u32 byte_size = htbl->chunk->byte_size;
+       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
        int iterations;
        int num_stes;
        u8 *copy_dst;
@@ -546,7 +547,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
        if (update_hw_ste) {
                /* Copy the reduced STE to hash table ste_arr */
                for (i = 0; i < num_stes; i++) {
-                       copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+                       copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
                        memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
                }
        }
@@ -568,8 +569,8 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
                send_info.write.length = byte_size;
                send_info.write.lkey = 0;
                send_info.remote_addr =
-                       mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
-               send_info.rkey = htbl->chunk->rkey;
+                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
 
                ret = dr_postsend_icm_data(dmn, &send_info);
                if (ret)
@@ -591,8 +592,9 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
        send_info.write.length = action->rewrite->num_of_actions *
                                 DR_MODIFY_ACTION_SIZE;
        send_info.write.lkey = 0;
-       send_info.remote_addr = action->rewrite->chunk->mr_addr;
-       send_info.rkey = action->rewrite->chunk->rkey;
+       send_info.remote_addr =
+               mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
 
        ret = dr_postsend_icm_data(dmn, &send_info);
 
index 518e949..09ebd30 100644 (file)
@@ -25,6 +25,7 @@ bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
 
 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
 {
+       u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
        struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
        u8 masked[DR_STE_SIZE_TAG] = {};
        u32 crc32, index;
@@ -32,7 +33,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
        int i;
 
        /* Don't calculate CRC if the result is predicted */
-       if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+       if (num_entries == 1 || htbl->byte_mask == 0)
                return 0;
 
        /* Mask tag using byte mask, bit per byte */
@@ -45,7 +46,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
        }
 
        crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
-       index = crc32 & (htbl->chunk->num_of_entries - 1);
+       index = crc32 & (num_entries - 1);
 
        return index;
 }
@@ -96,13 +97,11 @@ void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
 }
 
 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste *ste, u64 miss_addr)
+                                   u8 *hw_ste, u64 miss_addr)
 {
-       u8 *hw_ste_p = ste->hw_ste;
-
-       ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
-       ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
-       dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+       ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
+       ste_ctx->set_miss_addr(hw_ste, miss_addr);
+       dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
 }
 
 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -113,37 +112,45 @@ void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
 
 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
 {
-       u32 index = ste - ste->htbl->ste_arr;
+       u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
+       u32 index = ste - ste->htbl->chunk->ste_arr;
 
-       return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+       return base_icm_addr + DR_STE_SIZE * index;
 }
 
 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
 {
-       u32 index = ste - ste->htbl->ste_arr;
+       u32 index = ste - ste->htbl->chunk->ste_arr;
 
-       return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+       return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
+}
+
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
+{
+       u64 index = ste - ste->htbl->chunk->ste_arr;
+
+       return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
 }
 
 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
 {
-       u32 index = ste - ste->htbl->ste_arr;
+       u32 index = ste - ste->htbl->chunk->ste_arr;
 
-       return &ste->htbl->miss_list[index];
+       return &ste->htbl->chunk->miss_list[index];
 }
 
 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
-                                  struct mlx5dr_ste *ste,
+                                  u8 *hw_ste,
                                   struct mlx5dr_ste_htbl *next_htbl)
 {
        struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
-       u8 *hw_ste = ste->hw_ste;
 
        ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
        ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
-       ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+       ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
+                             mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
 
-       dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+       dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
 }
 
 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -166,7 +173,8 @@ bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
  */
 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
 {
-       memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
+              DR_STE_SIZE_REDUCED);
        dst->next_htbl = src->next_htbl;
        if (dst->next_htbl)
                dst->next_htbl->pointing_ste = dst;
@@ -184,18 +192,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
                       struct mlx5dr_ste_htbl *stats_tbl)
 {
        u8 tmp_data_ste[DR_STE_SIZE] = {};
-       struct mlx5dr_ste tmp_ste = {};
        u64 miss_addr;
 
-       tmp_ste.hw_ste = tmp_data_ste;
+       miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
 
        /* Use temp ste because dr_ste_always_miss_addr
         * touches bit_mask area which doesn't exist at ste->hw_ste.
+        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
         */
-       memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
-       miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
-       dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
-       memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+       dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
+       memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
 
        list_del_init(&ste->miss_list_node);
 
@@ -237,7 +244,7 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
        mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
 
        /* Copy all 64 hw_ste bytes */
-       memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
        sb_idx = ste->ste_chain_location - 1;
        mlx5dr_ste_set_bit_mask(hw_ste,
                                nic_matcher->ste_builder[sb_idx].bit_mask);
@@ -273,12 +280,13 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
        if (WARN_ON(!prev_ste))
                return;
 
-       miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
-       ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
+       miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
+       ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
 
        mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
-                                                 prev_ste->hw_ste, ste_info,
-                                                 send_ste_list, true /* Copy data*/);
+                                                 mlx5dr_ste_get_hw_ste(prev_ste),
+                                                 ste_info, send_ste_list,
+                                                 true /* Copy data*/);
 
        list_del_init(&ste->miss_list_node);
 
@@ -364,9 +372,11 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
                                          u8 *hw_ste,
                                          struct mlx5dr_ste_htbl *next_htbl)
 {
-       struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+       u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
+       u32 num_entries =
+               mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
 
-       ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+       ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
 }
 
 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
@@ -385,15 +395,22 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
                                  struct mlx5dr_htbl_connect_info *connect_info)
 {
        bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
-       struct mlx5dr_ste ste = {};
+       u8 tmp_hw_ste[DR_STE_SIZE] = {0};
 
        ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
-       ste.hw_ste = formatted_ste;
 
+       /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
+        * touches bit_mask area which doesn't exist at ste->hw_ste.
+        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+        */
+       memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
        if (connect_info->type == CONNECT_HIT)
-               dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
+               dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
+                                      connect_info->hit_next_htbl);
        else
-               dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
+               dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
+                                       connect_info->miss_icm_addr);
+       memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
 }
 
 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -444,7 +461,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
 
                /* Write new table to HW */
                info.type = CONNECT_MISS;
-               info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+               info.miss_icm_addr =
+                       mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
                if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
                                                      &info, false)) {
                        mlx5dr_info(dmn, "Failed writing table to HW\n");
@@ -470,6 +488,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
 {
        struct mlx5dr_icm_chunk *chunk;
        struct mlx5dr_ste_htbl *htbl;
+       u32 num_entries;
        int i;
 
        htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
@@ -483,22 +502,18 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
        htbl->chunk = chunk;
        htbl->lu_type = lu_type;
        htbl->byte_mask = byte_mask;
-       htbl->ste_arr = chunk->ste_arr;
-       htbl->hw_ste_arr = chunk->hw_ste_arr;
-       htbl->miss_list = chunk->miss_list;
        htbl->refcount = 0;
+       num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
 
-       for (i = 0; i < chunk->num_of_entries; i++) {
-               struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+       for (i = 0; i < num_entries; i++) {
+               struct mlx5dr_ste *ste = &chunk->ste_arr[i];
 
-               ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
                ste->htbl = htbl;
                ste->refcount = 0;
                INIT_LIST_HEAD(&ste->miss_list_node);
-               INIT_LIST_HEAD(&htbl->miss_list[i]);
+               INIT_LIST_HEAD(&chunk->miss_list[i]);
        }
 
-       htbl->chunk_size = chunk_size;
        return htbl;
 
 out_free_htbl:
index f5f2d35..e5f6412 100644 (file)
@@ -10,6 +10,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
        struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
        struct mlx5dr_htbl_connect_info info;
        struct mlx5dr_ste_htbl *last_htbl;
+       struct mlx5dr_icm_chunk *chunk;
        int ret;
 
        if (!list_empty(&nic_tbl->nic_matcher_list))
@@ -22,13 +23,14 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
        else
                last_htbl = nic_tbl->s_anchor;
 
-       if (action)
-               nic_tbl->default_icm_addr =
-                       nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
-                               action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
-                               action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
-       else
+       if (action) {
+               chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+                       action->dest_tbl->tbl->rx.s_anchor->chunk :
+                       action->dest_tbl->tbl->tx.s_anchor->chunk;
+               nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+       } else {
                nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+       }
 
        info.type = CONNECT_MISS;
        info.miss_icm_addr = nic_tbl->default_icm_addr;
@@ -222,10 +224,10 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
        int ret;
 
        if (tbl->rx.s_anchor)
-               icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+               icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
 
        if (tbl->tx.s_anchor)
-               icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+               icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
 
        ft_attr.table_type = tbl->table_type;
        ft_attr.icm_addr_rx = icm_addr_rx;
index 88092fa..46866a5 100644 (file)
@@ -147,10 +147,12 @@ struct mlx5dr_matcher_rx_tx;
 struct mlx5dr_ste_ctx;
 
 struct mlx5dr_ste {
-       u8 *hw_ste;
        /* refcount: indicates the num of rules that using this ste */
        u32 refcount;
 
+       /* this ste is part of a rule, located in ste's chain */
+       u8 ste_chain_location;
+
        /* attached to the miss_list head at each htbl entry */
        struct list_head miss_list_node;
 
@@ -161,9 +163,6 @@ struct mlx5dr_ste {
 
        /* The rule this STE belongs to */
        struct mlx5dr_rule_rx_tx *rule_rx_tx;
-
-       /* this ste is part of a rule, located in ste's chain */
-       u8 ste_chain_location;
 };
 
 struct mlx5dr_ste_htbl_ctrl {
@@ -181,14 +180,7 @@ struct mlx5dr_ste_htbl {
        u16 byte_mask;
        u32 refcount;
        struct mlx5dr_icm_chunk *chunk;
-       struct mlx5dr_ste *ste_arr;
-       u8 *hw_ste_arr;
-
-       struct list_head *miss_list;
-
-       enum mlx5dr_icm_chunk_size chunk_size;
        struct mlx5dr_ste *pointing_ste;
-
        struct mlx5dr_ste_htbl_ctrl ctrl;
 };
 
@@ -1097,16 +1089,12 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
 struct mlx5dr_icm_chunk {
        struct mlx5dr_icm_buddy_mem *buddy_mem;
        struct list_head chunk_list;
-       u32 rkey;
-       u32 num_of_entries;
-       u32 byte_size;
-       u64 icm_addr;
-       u64 mr_addr;
 
        /* indicates the index of this chunk in the whole memory,
         * used for deleting the chunk from the buddy
         */
        unsigned int seg;
+       enum mlx5dr_icm_chunk_size size;
 
        /* Memory optimisation */
        struct mlx5dr_ste *ste_arr;
@@ -1146,6 +1134,13 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
                                   enum mlx5dr_ipv outer_ipv,
                                   enum mlx5dr_ipv inner_ipv);
 
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+
 static inline int
 mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
 {
@@ -1178,7 +1173,7 @@ static inline int
 mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
 {
        int num_of_entries =
-               mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+               mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
 
        /* Threshold is 50%, one is added to table of size 1 */
        return (num_of_entries + 1) / 2;
@@ -1187,7 +1182,7 @@ mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
 static inline bool
 mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
 {
-       if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+       if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
                return false;
 
        return true;
index 00a914b..96cd740 100644 (file)
@@ -1009,9 +1009,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
-                  int size, struct mlx5_frag_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
                             struct mlx5_frag_buf *buf, int node);
 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -1039,7 +1036,6 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 void mlx5_register_debugfs(void);
 void mlx5_unregister_debugfs(void);
 
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);