OSDN Git Service

i40e: Add a stat for tracking busy rx pages
authorJoe Damato <jdamato@fastly.com>
Fri, 17 Dec 2021 19:35:19 +0000 (11:35 -0800)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Tue, 8 Feb 2022 16:21:52 +0000 (08:21 -0800)
In some cases, pages cannot be reused by i40e because the page is busy. Add
a counter for this event.

Busy page count is accessible via ethtool.

Signed-off-by: Joe Damato <jdamato@fastly.com>
Tested-by: Dave Switzer <david.switzer@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h

index d2e71db..55c6bce 100644 (file)
@@ -857,6 +857,7 @@ struct i40e_vsi {
        u64 rx_page_reuse;
        u64 rx_page_alloc;
        u64 rx_page_waive;
+       u64 rx_page_busy;
 
        /* These are containers of ring pointers, allocated at run-time */
        struct i40e_ring **rx_rings;
index 7e76cd0..e484996 100644 (file)
@@ -298,6 +298,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
        I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
        I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
        I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
+       I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
 };
 
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
index 385b241..9b7ce6d 100644 (file)
@@ -773,7 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
  **/
 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 {
-       u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive;
+       u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
        struct i40e_pf *pf = vsi->back;
        struct rtnl_link_stats64 *ons;
        struct rtnl_link_stats64 *ns;   /* netdev stats */
@@ -809,6 +809,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        rx_reuse = 0;
        rx_alloc = 0;
        rx_waive = 0;
+       rx_busy = 0;
        rcu_read_lock();
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
@@ -845,6 +846,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                rx_reuse += p->rx_stats.page_reuse_count;
                rx_alloc += p->rx_stats.page_alloc_count;
                rx_waive += p->rx_stats.page_waive_count;
+               rx_busy += p->rx_stats.page_busy_count;
 
                if (i40e_enabled_xdp_vsi(vsi)) {
                        /* locate XDP ring */
@@ -875,6 +877,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        vsi->rx_page_reuse = rx_reuse;
        vsi->rx_page_alloc = rx_alloc;
        vsi->rx_page_waive = rx_waive;
+       vsi->rx_page_busy = rx_busy;
 
        ns->rx_packets = rx_p;
        ns->rx_bytes = rx_b;
index 3d91b16..a628f4b 100644 (file)
@@ -1990,8 +1990,8 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
  * pointing to; otherwise, the DMA mapping needs to be destroyed and
  * page freed.
  *
- * rx_stats will be updated to indicate if the page was waived because it was
- * not reusable.
+ * rx_stats will be updated to indicate whether the page was waived
+ * or busy if it could not be reused.
  */
 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
                                   struct i40e_rx_queue_stats *rx_stats,
@@ -2008,13 +2008,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
+               rx_stats->page_busy_count++;
                return false;
+       }
 #else
 #define I40E_LAST_OFFSET \
        (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
-       if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+       if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
+               rx_stats->page_busy_count++;
                return false;
+       }
 #endif
 
        /* If we have drained the page fragment pool we need to update
index b1955d3..324699e 100644 (file)
@@ -300,6 +300,7 @@ struct i40e_rx_queue_stats {
        u64 page_reuse_count;
        u64 page_alloc_count;
        u64 page_waive_count;
+       u64 page_busy_count;
 };
 
 enum i40e_ring_state_t {