OSDN Git Service

RDMA/hns: Encapsulate flushing CQE as a function
authorWenpeng Liang <liangwenpeng@huawei.com>
Fri, 18 Jun 2021 10:10:18 +0000 (18:10 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 22 Jun 2021 18:17:07 +0000 (15:17 -0300)
The process of flushing CQE can be encapsultated into a function, which
can reduce duplicate code.

Link: https://lore.kernel.org/r/1624011020-16992-9-git-send-email-liweihang@huawei.com
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_qp.c

index cb01b25..a2a256f 100644 (file)
@@ -1252,6 +1252,7 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
 
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
 u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
index 6bcdd89..f6ace90 100644 (file)
@@ -624,18 +624,8 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
                                struct hns_roce_qp *qp)
 {
-       /*
-        * Hip08 hardware cannot flush the WQEs in SQ if the QP state
-        * gets into errored mode. Hence, as a workaround to this
-        * hardware limitation, driver needs to assist in flushing. But
-        * the flushing operation uses mailbox to convey the QP state to
-        * the hardware and which can sleep due to the mutex protection
-        * around the mailbox calls. Hence, use the deferred flush for
-        * now.
-        */
        if (unlikely(qp->state == IB_QPS_ERR)) {
-               if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
-                       init_flush_work(hr_dev, qp);
+               flush_cqe(hr_dev, qp);
        } else {
                struct hns_roce_v2_db sq_db = {};
 
@@ -651,18 +641,8 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
 static inline void update_rq_db(struct hns_roce_dev *hr_dev,
                                struct hns_roce_qp *qp)
 {
-       /*
-        * Hip08 hardware cannot flush the WQEs in RQ if the QP state
-        * gets into errored mode. Hence, as a workaround to this
-        * hardware limitation, driver needs to assist in flushing. But
-        * the flushing operation uses mailbox to convey the QP state to
-        * the hardware and which can sleep due to the mutex protection
-        * around the mailbox calls. Hence, use the deferred flush for
-        * now.
-        */
        if (unlikely(qp->state == IB_QPS_ERR)) {
-               if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
-                       init_flush_work(hr_dev, qp);
+               flush_cqe(hr_dev, qp);
        } else {
                if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
                        *qp->rdb.db_record =
@@ -3553,17 +3533,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
        if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
                return;
 
-       /*
-        * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
-        * into errored mode. Hence, as a workaround to this hardware
-        * limitation, driver needs to assist in flushing. But the flushing
-        * operation uses mailbox to convey the QP state to the hardware and
-        * which can sleep due to the mutex protection around the mailbox calls.
-        * Hence, use the deferred flush for now. Once wc error detected, the
-        * flushing operation is needed.
-        */
-       if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
-               init_flush_work(hr_dev, qp);
+       flush_cqe(hr_dev, qp);
 }
 
 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
index 090b143..b101b7e 100644 (file)
@@ -79,6 +79,21 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
        queue_work(hr_dev->irq_workq, &flush_work->work);
 }
 
+void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
+{
+       /*
+        * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
+        * gets into errored mode. Hence, as a workaround to this
+        * hardware limitation, driver needs to assist in flushing. But
+        * the flushing operation uses mailbox to convey the QP state to
+        * the hardware and which can sleep due to the mutex protection
+        * around the mailbox calls. Hence, use the deferred flush for
+        * now.
+        */
+       if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+               init_flush_work(dev, qp);
+}
+
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
 {
        struct device *dev = hr_dev->dev;
@@ -102,8 +117,8 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
             event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
             event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
                qp->state = IB_QPS_ERR;
-               if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
-                       init_flush_work(hr_dev, qp);
+
+               flush_cqe(hr_dev, qp);
        }
 
        qp->event(qp, (enum hns_roce_event)event_type);