OSDN Git Service

scsi: lpfc: Fix hdwq sgl locks and irq handling
authorJames Smart <jsmart2021@gmail.com>
Sun, 22 Sep 2019 03:59:01 +0000 (20:59 -0700)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 1 Oct 2019 02:07:10 +0000 (22:07 -0400)
Many of the sgl-per-hdwq paths are locking with spin_lock_irq() and
spin_unlock_irq() and may unwittingly raising irq when it shouldn't. Hard
deadlocks were seen around lpfc_scsi_prep_cmnd().

Fix by converting the locks to irqsave/irqrestore.

Fixes: d79c9e9d4b3d ("scsi: lpfc: Support dynamic unbounded SGL lists on G7 hardware.")
Link: https://lore.kernel.org/r/20190922035906.10977-16-jsmart2021@gmail.com
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc_sli.c

index ff261c0..6d89dd3 100644 (file)
@@ -20444,8 +20444,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
        struct sli4_hybrid_sgl *allocated_sgl = NULL;
        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
        struct list_head *buf_list = &hdwq->sgl_list;
+       unsigned long iflags;
 
-       spin_lock_irq(&hdwq->hdwq_lock);
+       spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
 
        if (likely(!list_empty(buf_list))) {
                /* break off 1 chunk from the sgl_list */
@@ -20457,7 +20458,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
                }
        } else {
                /* allocate more */
-               spin_unlock_irq(&hdwq->hdwq_lock);
+               spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
                tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
                                   cpu_to_node(smp_processor_id()));
                if (!tmp) {
@@ -20479,7 +20480,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
                        return NULL;
                }
 
-               spin_lock_irq(&hdwq->hdwq_lock);
+               spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
                list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
        }
 
@@ -20487,7 +20488,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
                                        struct sli4_hybrid_sgl,
                                        list_node);
 
-       spin_unlock_irq(&hdwq->hdwq_lock);
+       spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
 
        return allocated_sgl;
 }
@@ -20511,8 +20512,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
        struct sli4_hybrid_sgl *tmp = NULL;
        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
        struct list_head *buf_list = &hdwq->sgl_list;
+       unsigned long iflags;
 
-       spin_lock_irq(&hdwq->hdwq_lock);
+       spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
 
        if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
                list_for_each_entry_safe(list_entry, tmp,
@@ -20525,7 +20527,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
                rc = -EINVAL;
        }
 
-       spin_unlock_irq(&hdwq->hdwq_lock);
+       spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
        return rc;
 }
 
@@ -20546,8 +20548,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
        struct list_head *buf_list = &hdwq->sgl_list;
        struct sli4_hybrid_sgl *list_entry = NULL;
        struct sli4_hybrid_sgl *tmp = NULL;
+       unsigned long iflags;
 
-       spin_lock_irq(&hdwq->hdwq_lock);
+       spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
 
        /* Free sgl pool */
        list_for_each_entry_safe(list_entry, tmp,
@@ -20559,7 +20562,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
                kfree(list_entry);
        }
 
-       spin_unlock_irq(&hdwq->hdwq_lock);
+       spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
 }
 
 /**
@@ -20583,8 +20586,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
        struct fcp_cmd_rsp_buf *allocated_buf = NULL;
        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+       unsigned long iflags;
 
-       spin_lock_irq(&hdwq->hdwq_lock);
+       spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
 
        if (likely(!list_empty(buf_list))) {
                /* break off 1 chunk from the list */
@@ -20597,7 +20601,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
                }
        } else {
                /* allocate more */
-               spin_unlock_irq(&hdwq->hdwq_lock);
+               spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
                tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
                                   cpu_to_node(smp_processor_id()));
                if (!tmp) {
@@ -20624,7 +20628,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
                tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
                                sizeof(struct fcp_cmnd));
 
-               spin_lock_irq(&hdwq->hdwq_lock);
+               spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
                list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
        }
 
@@ -20632,7 +20636,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
                                        struct fcp_cmd_rsp_buf,
                                        list_node);
 
-       spin_unlock_irq(&hdwq->hdwq_lock);
+       spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
 
        return allocated_buf;
 }
@@ -20657,8 +20661,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
        struct fcp_cmd_rsp_buf *tmp = NULL;
        struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+       unsigned long iflags;
 
-       spin_lock_irq(&hdwq->hdwq_lock);
+       spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
 
        if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
                list_for_each_entry_safe(list_entry, tmp,
@@ -20671,7 +20676,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
                rc = -EINVAL;
        }
 
-       spin_unlock_irq(&hdwq->hdwq_lock);
+       spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
        return rc;
 }
 
@@ -20692,8 +20697,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
        struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
        struct fcp_cmd_rsp_buf *list_entry = NULL;
        struct fcp_cmd_rsp_buf *tmp = NULL;
+       unsigned long iflags;
 
-       spin_lock_irq(&hdwq->hdwq_lock);
+       spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
 
        /* Free cmd_rsp buf pool */
        list_for_each_entry_safe(list_entry, tmp,
@@ -20706,5 +20712,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
                kfree(list_entry);
        }
 
-       spin_unlock_irq(&hdwq->hdwq_lock);
+       spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
 }