* @q: The Event Queue to disable interrupts
*
**/
-static inline void
+inline void
lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
{
struct lpfc_register doorbell;
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
int qidx;
+ struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
- lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
- lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- if (phba->sli4_hba.nvmels_cq)
- lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
+ sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
+ sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
+ if (sli4_hba->nvmels_cq)
+ sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
LPFC_QUEUE_REARM);
- if (phba->sli4_hba.fcp_cq)
+ if (sli4_hba->fcp_cq)
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
- lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
+ sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
LPFC_QUEUE_REARM);
- if (phba->sli4_hba.nvme_cq)
+ if (sli4_hba->nvme_cq)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
- lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
+ sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
LPFC_QUEUE_REARM);
if (phba->cfg_fof)
- lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+ sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
- if (phba->sli4_hba.hba_eq)
+ if (sli4_hba->hba_eq)
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
- lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
- LPFC_QUEUE_REARM);
+ sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
+ LPFC_QUEUE_REARM);
if (phba->nvmet_support) {
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
- lpfc_sli4_cq_release(
- phba->sli4_hba.nvmet_cqset[qidx],
+ sli4_hba->sli4_cq_release(
+ sli4_hba->nvmet_cqset[qidx],
LPFC_QUEUE_REARM);
}
}
if (phba->cfg_fof)
- lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
+ sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
}
/**
bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
{
-
+ struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
uint32_t eqidx;
struct lpfc_queue *fpeq = NULL;
struct lpfc_eqe *eqe;
/* Find the eq associated with the mcq */
- if (phba->sli4_hba.hba_eq)
+ if (sli4_hba->hba_eq)
for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
- if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
- phba->sli4_hba.mbx_cq->assoc_qid) {
- fpeq = phba->sli4_hba.hba_eq[eqidx];
+ if (sli4_hba->hba_eq[eqidx]->queue_id ==
+ sli4_hba->mbx_cq->assoc_qid) {
+ fpeq = sli4_hba->hba_eq[eqidx];
break;
}
if (!fpeq)
/* Turn off interrupts from this EQ */
- lpfc_sli4_eq_clr_intr(fpeq);
+ sli4_hba->sli4_eq_clr_intr(fpeq);
/* Check to see if a mbox completion is pending */
/* Always clear and re-arm the EQ */
- lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+ sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
return mbox_pending;
fpeq = phba->sli4_hba.hba_eq[idx];
/* Turn off interrupts from this EQ */
- lpfc_sli4_eq_clr_intr(fpeq);
+ phba->sli4_hba.sli4_eq_clr_intr(fpeq);
/*
* Process all the events on FCP EQ
}
/* Always clear and re-arm the EQ */
- lpfc_sli4_eq_release(fpeq,
+ phba->sli4_hba.sli4_eq_release(fpeq,
LPFC_QUEUE_REARM);
}
atomic_inc(&hba_eq_hdl->hba_eq_in_use);
"(x%x), type (%d)\n", cq->queue_id, cq->type);
/* In any case, flash and re-arm the RCQ */
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
/* wake up worker thread if there are works to be done */
if (workposted)
"queue fcpcqid=%d\n", cq->queue_id);
/* In any case, flash and re-arm the CQ */
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
/* wake up worker thread if there are works to be done */
if (workposted)
;
/* Clear and re-arm the EQ */
- lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
}
}
/* Always clear and re-arm the fast-path EQ */
- lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
return IRQ_HANDLED;
}
if (lpfc_fcp_look_ahead) {
if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
- lpfc_sli4_eq_clr_intr(fpeq);
+ phba->sli4_hba.sli4_eq_clr_intr(fpeq);
else {
atomic_inc(&hba_eq_hdl->hba_eq_in_use);
return IRQ_NONE;
fpeq->EQ_max_eqe = ecount;
/* Always clear and re-arm the fast-path EQ */
- lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;