OSDN Git Service

xsk: Return the whole xdp_desc from xsk_umem_consume_tx
authorMaxim Mikityanskiy <maximmi@mellanox.com>
Wed, 26 Jun 2019 14:35:28 +0000 (17:35 +0300)
committerDaniel Borkmann <daniel@iogearbox.net>
Thu, 27 Jun 2019 20:53:27 +0000 (22:53 +0200)
Some drivers want to access the data transmitted in order to implement
acceleration features of the NICs. It is also useful in AF_XDP TX flow.

Change the xsk_umem_consume_tx API to return the whole xdp_desc, that
contains the data pointer, length and DMA address, instead of only the
latter two. Adapt the implementation of i40e and ixgbe to this change.

Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@intel.com>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
include/net/xdp_sock.h
net/xdp/xsk.c

index 557c565..32bad01 100644 (file)
@@ -641,8 +641,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
        struct i40e_tx_desc *tx_desc = NULL;
        struct i40e_tx_buffer *tx_bi;
        bool work_done = true;
+       struct xdp_desc desc;
        dma_addr_t dma;
-       u32 len;
 
        while (budget-- > 0) {
                if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
@@ -651,21 +651,23 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
                        break;
                }
 
-               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
                        break;
 
-               dma_sync_single_for_device(xdp_ring->dev, dma, len,
+               dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+               dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
                                           DMA_BIDIRECTIONAL);
 
                tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
-               tx_bi->bytecount = len;
+               tx_bi->bytecount = desc.len;
 
                tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
                tx_desc->buffer_addr = cpu_to_le64(dma);
                tx_desc->cmd_type_offset_bsz =
                        build_ctob(I40E_TX_DESC_CMD_ICRC
                                   | I40E_TX_DESC_CMD_EOP,
-                                  0, len, 0);
+                                  0, desc.len, 0);
 
                xdp_ring->next_to_use++;
                if (xdp_ring->next_to_use == xdp_ring->count)
index 6af55bb..6b60955 100644 (file)
@@ -571,8 +571,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
        union ixgbe_adv_tx_desc *tx_desc = NULL;
        struct ixgbe_tx_buffer *tx_bi;
        bool work_done = true;
-       u32 len, cmd_type;
+       struct xdp_desc desc;
        dma_addr_t dma;
+       u32 cmd_type;
 
        while (budget-- > 0) {
                if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
@@ -581,14 +582,16 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
                        break;
                }
 
-               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
                        break;
 
-               dma_sync_single_for_device(xdp_ring->dev, dma, len,
+               dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+               dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
                                           DMA_BIDIRECTIONAL);
 
                tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
-               tx_bi->bytecount = len;
+               tx_bi->bytecount = desc.len;
                tx_bi->xdpf = NULL;
                tx_bi->gso_segs = 1;
 
@@ -599,10 +602,10 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
                cmd_type = IXGBE_ADVTXD_DTYP_DATA |
                           IXGBE_ADVTXD_DCMD_DEXT |
                           IXGBE_ADVTXD_DCMD_IFCS;
-               cmd_type |= len | IXGBE_TXD_CMD;
+               cmd_type |= desc.len | IXGBE_TXD_CMD;
                tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
                tx_desc->read.olinfo_status =
-                       cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+                       cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
                xdp_ring->next_to_use++;
                if (xdp_ring->next_to_use == xdp_ring->count)
index b6f5eba..057b159 100644 (file)
@@ -81,7 +81,7 @@ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
 void xsk_umem_discard_addr(struct xdp_umem *umem);
 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
@@ -175,8 +175,8 @@ static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
 {
 }
 
-static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
-                                      u32 *len)
+static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
+                                      struct xdp_desc *desc)
 {
        return false;
 }
index 35ca531..74417a8 100644 (file)
@@ -172,22 +172,18 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
 
-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
 {
-       struct xdp_desc desc;
        struct xdp_sock *xs;
 
        rcu_read_lock();
        list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
-               if (!xskq_peek_desc(xs->tx, &desc))
+               if (!xskq_peek_desc(xs->tx, desc))
                        continue;
 
-               if (xskq_produce_addr_lazy(umem->cq, desc.addr))
+               if (xskq_produce_addr_lazy(umem->cq, desc->addr))
                        goto out;
 
-               *dma = xdp_umem_get_dma(umem, desc.addr);
-               *len = desc.len;
-
                xskq_discard_desc(xs->tx);
                rcu_read_unlock();
                return true;