OSDN Git Service

net: aquantia: link status irq handling
authorIgor Russkikh <Igor.Russkikh@aquantia.com>
Mon, 29 Apr 2019 10:04:48 +0000 (10:04 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 1 May 2019 13:30:15 +0000 (09:30 -0400)
Here we define and request an extra interrupt line,
assign it on link isr handler and restructure abit aq_pci code
to better support that.

We also remove logic for using different timer intervals
depending on link state, since thats now useless.

Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c

index 4851fc0..0251566 100644 (file)
@@ -14,6 +14,7 @@
 #include "aq_vec.h"
 #include "aq_hw.h"
 #include "aq_pci_func.h"
+#include "aq_main.h"
 
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
@@ -92,7 +93,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
        /*rss rings */
        cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
        cfg->vecs = min(cfg->vecs, num_online_cpus());
-       cfg->vecs = min(cfg->vecs, self->irqvecs);
+       if (self->irqvecs > AQ_HW_SERVICE_IRQS)
+               cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
        /* cfg->vecs should be power of 2 for RSS */
        if (cfg->vecs >= 8U)
                cfg->vecs = 8U;
@@ -116,6 +118,15 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
                cfg->vecs = 1U;
        }
 
+       /* Check if we have enough vectors allocated for
+        * link status IRQ. If no - we'll know link state from
+        * slower service task.
+        */
+       if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
+               cfg->link_irq_vec = cfg->vecs;
+       else
+               cfg->link_irq_vec = 0;
+
        cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
        cfg->features = cfg->aq_hw_caps->hw_features;
 }
@@ -178,7 +189,6 @@ static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
 static void aq_nic_service_timer_cb(struct timer_list *t)
 {
        struct aq_nic_s *self = from_timer(self, t, service_timer);
-       int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
        int err = 0;
 
        if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
@@ -193,12 +203,8 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
 
        aq_nic_update_ndev_stats(self);
 
-       /* If no link - use faster timer rate to detect link up asap */
-       if (!netif_carrier_ok(self->ndev))
-               ctimer = max(ctimer / 2, 1);
-
 err_exit:
-       mod_timer(&self->service_timer, jiffies + ctimer);
+       mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
 }
 
 static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -359,13 +365,25 @@ int aq_nic_start(struct aq_nic_s *self)
        } else {
                for (i = 0U, aq_vec = self->aq_vec[0];
                        self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
-                       err = aq_pci_func_alloc_irq(self, i,
-                                                   self->ndev->name, aq_vec,
+                       err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
+                                                   aq_vec_isr, aq_vec,
                                                    aq_vec_get_affinity_mask(aq_vec));
                        if (err < 0)
                                goto err_exit;
                }
 
+               if (self->aq_nic_cfg.link_irq_vec) {
+                       int irqvec = pci_irq_vector(self->pdev,
+                                                  self->aq_nic_cfg.link_irq_vec);
+                       err = request_threaded_irq(irqvec, NULL,
+                                                  aq_linkstate_threaded_isr,
+                                                  IRQF_SHARED,
+                                                  self->ndev->name, self);
+                       if (err < 0)
+                               goto err_exit;
+                       self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
+               }
+
                err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
                                                     AQ_CFG_IRQ_MASK);
                if (err < 0)
index eec49e6..4f373ea 100644 (file)
@@ -140,26 +140,27 @@ err_exit:
 }
 
 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
-                         char *name, void *aq_vec, cpumask_t *affinity_mask)
+                         char *name, irq_handler_t irq_handler,
+                         void *irq_arg, cpumask_t *affinity_mask)
 {
        struct pci_dev *pdev = self->pdev;
        int err;
 
        if (pdev->msix_enabled || pdev->msi_enabled)
-               err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
-                                 name, aq_vec);
+               err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
+                                 name, irq_arg);
        else
                err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
-                                 IRQF_SHARED, name, aq_vec);
+                                 IRQF_SHARED, name, irq_arg);
 
        if (err >= 0) {
                self->msix_entry_mask |= (1 << i);
-               self->aq_vec[i] = aq_vec;
 
-               if (pdev->msix_enabled)
+               if (pdev->msix_enabled && affinity_mask)
                        irq_set_affinity_hint(pci_irq_vector(pdev, i),
                                              affinity_mask);
        }
+
        return err;
 }
 
@@ -167,16 +168,22 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
 {
        struct pci_dev *pdev = self->pdev;
        unsigned int i;
+       void *irq_data;
 
        for (i = 32U; i--;) {
                if (!((1U << i) & self->msix_entry_mask))
                        continue;
-               if (i >= AQ_CFG_VECS_MAX)
+               if (self->aq_nic_cfg.link_irq_vec &&
+                   i == self->aq_nic_cfg.link_irq_vec)
+                       irq_data = self;
+               else if (i < AQ_CFG_VECS_MAX)
+                       irq_data = self->aq_vec[i];
+               else
                        continue;
 
                if (pdev->msix_enabled)
                        irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
-               free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
+               free_irq(pci_irq_vector(pdev, i), irq_data);
                self->msix_entry_mask &= ~(1U << i);
        }
 }
@@ -269,6 +276,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
        numvecs = min((u8)AQ_CFG_VECS_DEF,
                      aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
        numvecs = min(numvecs, num_online_cpus());
+       numvecs += AQ_HW_SERVICE_IRQS;
        /*enable interrupts */
 #if !AQ_CFG_FORCE_LEGACY_INT
        err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
index 799c5e0..670f9a9 100644 (file)
@@ -24,8 +24,8 @@ struct aq_board_revision_s {
 
 int aq_pci_func_init(struct pci_dev *pdev);
 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
-                         char *name, void *aq_vec,
-                         cpumask_t *affinity_mask);
+                         char *name, irq_handler_t irq_handler,
+                         void *irq_arg, cpumask_t *affinity_mask);
 void aq_pci_func_free_irqs(struct aq_nic_s *self);
 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
 
index 7e95804..d54566b 100644 (file)
@@ -443,6 +443,11 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
                                   ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
                            ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
 
+       /* Enable link interrupt */
+       if (aq_nic_cfg->link_irq_vec)
+               hw_atl_reg_gen_irq_map_set(self, BIT(7) |
+                                          aq_nic_cfg->link_irq_vec, 3U);
+
        hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
 
 err_exit: