OSDN Git Service

net/mlx5: DR, Warn and ignore SW steering rule insertion on QP err
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Sun, 4 Jul 2021 08:57:38 +0000 (11:57 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 26 Aug 2021 22:38:01 +0000 (15:38 -0700)
In the event of SW steering QP entering error state, SW steering
cannot insert more rules, and will silently ignore the insertion
after issuing a warning.

Signed-off-by: Yuval Avnery <yuvalav@mellanox.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h

index 24f40e1..bfb14b4 100644 (file)
@@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
 
        do {
                ne = dr_poll_cq(send_ring->cq, 1);
-               if (ne < 0)
+               if (unlikely(ne < 0)) {
+                       mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
+                                           send_ring->qp->qpn);
+                       send_ring->err_state = true;
                        return ne;
-               else if (ne == 1)
+               } else if (ne == 1) {
                        send_ring->pending_wqe -= send_ring->signal_th;
+               }
        } while (is_drain && send_ring->pending_wqe);
 
        return 0;
@@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
        u32 buff_offset;
        int ret;
 
+       if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+                    send_ring->err_state)) {
+               mlx5_core_dbg_once(dmn->mdev,
+                                  "Skipping post send: QP err state: %d, device state: %d\n",
+                                  send_ring->err_state, dmn->mdev->state);
+               return 0;
+       }
+
        spin_lock(&send_ring->lock);
 
        ret = dr_handle_pending_wc(dmn, send_ring);
index 474cf32..4fd14e9 100644 (file)
@@ -1285,6 +1285,7 @@ struct mlx5dr_send_ring {
        u8 sync_buff[MIN_READ_SYNC];
        struct mlx5dr_mr *sync_mr;
        spinlock_t lock; /* Protect the data path of the send ring */
+       bool err_state; /* send_ring is not usable in err state */
 };
 
 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);