u64_stats_update_end(syncp);
}
+static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
+{
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
+}
+
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
netif_dbg(adapter, tx_queued, dev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
- ena_com_write_sq_doorbell(ring->ena_com_io_sq);
+ ena_ring_tx_doorbell(ring);
}
/* prepare the packet's descriptors to dma engine */
xdpf->len);
if (rc)
goto error_unmap_dma;
- /* trigger the dma engine. ena_com_write_sq_doorbell()
- * has a mb
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
*/
- if (flags & XDP_XMIT_FLUSH) {
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
- &xdp_ring->syncp);
- }
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(xdp_ring);
return rc;
}
/* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH) {
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
- &xdp_ring->syncp);
- }
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(xdp_ring);
spin_unlock(&xdp_ring->xdp_tx_lock);
}
}
- if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
- /* trigger the dma engine. ena_com_write_sq_doorbell()
- * has a mb
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
*/
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
- &tx_ring->syncp);
- }
+ ena_ring_tx_doorbell(tx_ring);
return NETDEV_TX_OK;