OSDN Git Service

net/mlx5: Use generic definition for UMR KLM alignment
authorTariq Toukan <tariqt@nvidia.com>
Mon, 31 Oct 2022 12:24:02 +0000 (14:24 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 30 Nov 2022 05:09:44 +0000 (21:09 -0800)
MLX5_UMR_KLM_ALIGNMENT is in units of number of entries, while
MLX5_UMR_MTT_ALIGNMENT (generalized and renamed to
MLX5_UMR_FLEX_ALIGNMENT) is in byte units. This is misleading and
confusing.
Replace this KLM definition with one based on the generic definition.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
include/linux/mlx5/device.h

index 3cad59a..65790ff 100644 (file)
@@ -160,7 +160,7 @@ struct page_pool;
        (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
 
 #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
-       ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+       ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
 
 #define MLX5E_MAX_KLM_PER_WQE(mdev) \
        MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
index 8d71736..c8820ab 100644 (file)
@@ -593,8 +593,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
        int headroom, i;
 
        headroom = rq->buff.headroom;
-       new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
-       entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+       new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+       entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
        wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
        pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
        umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
@@ -603,7 +603,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
        for (i = 0; i < entries; i++, index++) {
                dma_info = &shampo->info[index];
                if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
-                                        MLX5_UMR_KLM_ALIGNMENT))
+                                        MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
                        goto update_klm;
                header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
                        MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
@@ -668,8 +668,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
        if (!klm_entries)
                return 0;
 
-       klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
-       index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+       klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+       index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
        entries_before = shampo->hd_per_wq - index;
 
        if (unlikely(entries_before < klm_entries))
index a02f779..5fe5d19 100644 (file)
@@ -290,9 +290,9 @@ enum {
        MLX5_UMR_INLINE                 = (1 << 7),
 };
 
-#define MLX5_UMR_KLM_ALIGNMENT 4
 #define MLX5_UMR_FLEX_ALIGNMENT 0x40
 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
+#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
 
 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)