OSDN Git Service

RDMA/cxgb4: release hw resources on device removal
authorRaju Rangoju <rajur@chelsio.com>
Mon, 23 Apr 2018 16:12:37 +0000 (21:42 +0530)
committerDoug Ledford <dledford@redhat.com>
Fri, 27 Apr 2018 17:52:31 +0000 (13:52 -0400)
The c4iw_rdev_close() logic was not releasing all the hw
resources (PBL and RQT memory) during the device removal
event (driver unload / system reboot). This can cause panic
in gen_pool_destroy().

The module remove function will wait for all the hw
resources to be released during the device removal event.

Fixes c12a67fe(iw_cxgb4: free EQ queue memory on last deref)
Signed-off-by: Raju Rangoju <rajur@chelsio.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Cc: stable@vger.kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/resource.c

index feeb8ee..44161ca 100644 (file)
@@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 
        rdev->status_page->db_off = 0;
 
+       init_completion(&rdev->rqt_compl);
+       init_completion(&rdev->pbl_compl);
+       kref_init(&rdev->rqt_kref);
+       kref_init(&rdev->pbl_kref);
+
        return 0;
 err_free_status_page_and_wr_log:
        if (c4iw_wr_log && rdev->wr_log)
@@ -893,13 +898,15 @@ destroy_resource:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
-       destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
        c4iw_release_dev_ucontext(rdev, &rdev->uctx);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
        c4iw_rqtpool_destroy(rdev);
+       wait_for_completion(&rdev->pbl_compl);
+       wait_for_completion(&rdev->rqt_compl);
        c4iw_ocqp_pool_destroy(rdev);
+       destroy_workqueue(rdev->free_workq);
        c4iw_destroy_resource(&rdev->resource);
 }
 
index cc92900..a60def2 100644 (file)
@@ -185,6 +185,10 @@ struct c4iw_rdev {
        struct wr_log_entry *wr_log;
        int wr_log_size;
        struct workqueue_struct *free_workq;
+       struct completion rqt_compl;
+       struct completion pbl_compl;
+       struct kref rqt_kref;
+       struct kref pbl_kref;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
index 3cf2599..0ef25ae 100644 (file)
@@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
                rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
                if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
                        rdev->stats.pbl.max = rdev->stats.pbl.cur;
+               kref_get(&rdev->pbl_kref);
        } else
                rdev->stats.pbl.fail++;
        mutex_unlock(&rdev->stats.lock);
        return (u32)addr;
 }
 
+static void destroy_pblpool(struct kref *kref)
+{
+       struct c4iw_rdev *rdev;
+
+       rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+       gen_pool_destroy(rdev->pbl_pool);
+       complete(&rdev->pbl_compl);
+}
+
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
        pr_debug("addr 0x%x size %d\n", addr, size);
@@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
        rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
        mutex_unlock(&rdev->stats.lock);
        gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+       kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
 
 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 {
-       gen_pool_destroy(rdev->pbl_pool);
+       kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 /*
@@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
                rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
                if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
                        rdev->stats.rqt.max = rdev->stats.rqt.cur;
+               kref_get(&rdev->rqt_kref);
        } else
                rdev->stats.rqt.fail++;
        mutex_unlock(&rdev->stats.lock);
        return (u32)addr;
 }
 
+static void destroy_rqtpool(struct kref *kref)
+{
+       struct c4iw_rdev *rdev;
+
+       rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+       gen_pool_destroy(rdev->rqt_pool);
+       complete(&rdev->rqt_compl);
+}
+
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
        pr_debug("addr 0x%x size %d\n", addr, size << 6);
@@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
        rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
        mutex_unlock(&rdev->stats.lock);
        gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+       kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
 
 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 {
-       gen_pool_destroy(rdev->rqt_pool);
+       kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 /*