OSDN Git Service

net/mlx5e: XDP, Add support for multi-buffer XDP redirect-in
authorTariq Toukan <tariqt@nvidia.com>
Mon, 17 Apr 2023 12:18:54 +0000 (15:18 +0300)
committerDavid S. Miller <davem@davemloft.net>
Wed, 19 Apr 2023 07:59:26 +0000 (08:59 +0100)
Handle multi-buffer XDP redirect-in requests coming through
mlx5e_xdp_xmit.

Extend struct mlx5e_xmit_data_frags with an additional dma_arr field, to
point to the fragments dma mapping, as they cannot be retrieved via the
page_pool_get_dma_addr() function.

Push a dma_addr xdpi instance per each fragment, and use them in the
completion flow to dma_unmap the frags.

Finally, remove the restriction in mlx5e_open_xdpsq, and set the flag in
xdp_features.

Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 1302f52..47381e9 100644 (file)
@@ -87,6 +87,7 @@ struct mlx5e_xmit_data {
 struct mlx5e_xmit_data_frags {
        struct mlx5e_xmit_data xd;
        struct skb_shared_info *sinfo;
+       dma_addr_t *dma_arr;
 };
 
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
index 5dab901..c266d07 100644 (file)
@@ -126,6 +126,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 
        if (xdptxd->has_frags) {
                xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
+               xdptxdf.dma_arr = NULL;
 
                for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
                        skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
@@ -548,7 +549,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
                        skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
                        dma_addr_t addr;
 
-                       addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+                       addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+                               page_pool_get_dma_addr(skb_frag_page(frag)) +
                                skb_frag_off(frag);
 
                        dseg++;
@@ -601,6 +603,21 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
 
                        dma_unmap_single(sq->pdev, dma_addr,
                                         xdpf->len, DMA_TO_DEVICE);
+                       if (xdp_frame_has_frags(xdpf)) {
+                               struct skb_shared_info *sinfo;
+                               int j;
+
+                               sinfo = xdp_get_shared_info_from_frame(xdpf);
+                               for (j = 0; j < sinfo->nr_frags; j++) {
+                                       skb_frag_t *frag = &sinfo->frags[j];
+
+                                       xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+                                       dma_addr = xdpi.frame.dma_addr;
+
+                                       dma_unmap_single(sq->pdev, dma_addr,
+                                                        skb_frag_size(frag), DMA_TO_DEVICE);
+                               }
+                       }
                        xdp_return_frame_bulk(xdpf, bq);
                        break;
                }
@@ -759,23 +776,57 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
        sq = &priv->channels.c[sq_num]->xdpsq;
 
        for (i = 0; i < n; i++) {
+               struct mlx5e_xmit_data_frags xdptxdf = {};
                struct xdp_frame *xdpf = frames[i];
-               struct mlx5e_xmit_data xdptxd = {};
+               dma_addr_t dma_arr[MAX_SKB_FRAGS];
+               struct mlx5e_xmit_data *xdptxd;
                bool ret;
 
-               xdptxd.data = xdpf->data;
-               xdptxd.len = xdpf->len;
-               xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
-                                                xdptxd.len, DMA_TO_DEVICE);
+               xdptxd = &xdptxdf.xd;
+               xdptxd->data = xdpf->data;
+               xdptxd->len = xdpf->len;
+               xdptxd->has_frags = xdp_frame_has_frags(xdpf);
+               xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data,
+                                                 xdptxd->len, DMA_TO_DEVICE);
 
-               if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
+               if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr)))
                        break;
 
+               if (xdptxd->has_frags) {
+                       int j;
+
+                       xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
+                       xdptxdf.dma_arr = dma_arr;
+                       for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) {
+                               skb_frag_t *frag = &xdptxdf.sinfo->frags[j];
+
+                               dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag),
+                                                           skb_frag_size(frag), DMA_TO_DEVICE);
+
+                               if (!dma_mapping_error(sq->pdev, dma_arr[j]))
+                                       continue;
+                               /* mapping error */
+                               while (--j >= 0)
+                                       dma_unmap_single(sq->pdev, dma_arr[j],
+                                                        skb_frag_size(&xdptxdf.sinfo->frags[j]),
+                                                        DMA_TO_DEVICE);
+                               goto out;
+                       }
+               }
+
                ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, 0);
+                                     mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
                if (unlikely(!ret)) {
-                       dma_unmap_single(sq->pdev, xdptxd.dma_addr,
-                                        xdptxd.len, DMA_TO_DEVICE);
+                       int j;
+
+                       dma_unmap_single(sq->pdev, xdptxd->dma_addr,
+                                        xdptxd->len, DMA_TO_DEVICE);
+                       if (!xdptxd->has_frags)
+                               break;
+                       for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
+                               dma_unmap_single(sq->pdev, dma_arr[j],
+                                                skb_frag_size(&xdptxdf.sinfo->frags[j]),
+                                                DMA_TO_DEVICE);
                        break;
                }
 
@@ -785,10 +836,19 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
                                     (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
                mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
-                                    (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd.dma_addr });
+                                    (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr });
+               if (xdptxd->has_frags) {
+                       int j;
+
+                       for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
+                               mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+                                                    (union mlx5e_xdp_info)
+                                                    { .frame.dma_addr = dma_arr[j] });
+               }
                nxmit++;
        }
 
+out:
        if (flags & XDP_XMIT_FLUSH) {
                if (sq->mpwqe.wqe)
                        mlx5e_xdp_mpwqe_complete(sq);
index 0b5aafa..ccf7bb1 100644 (file)
@@ -1862,11 +1862,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
        csp.min_inline_mode = sq->min_inline_mode;
        set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
 
-       /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
-        * supported by upstream, and there is no defined trigger to allow
-        * transmitting redirected multi-buffer frames.
-        */
-       if (param->is_xdp_mb && !is_redirect)
+       if (param->is_xdp_mb)
                set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
 
        err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
@@ -4068,7 +4064,8 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
 
        val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
              NETDEV_XDP_ACT_XSK_ZEROCOPY |
-             NETDEV_XDP_ACT_NDO_XMIT;
+             NETDEV_XDP_ACT_NDO_XMIT |
+             NETDEV_XDP_ACT_NDO_XMIT_SG;
        if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
                val |= NETDEV_XDP_ACT_RX_SG;
        xdp_set_features_flag(netdev, val);