OSDN Git Service

liquidio: avoided acquiring post_lock for data only queues
authorIntiyaz Basha <intiyaz.basha@cavium.com>
Mon, 6 Aug 2018 20:09:40 +0000 (13:09 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 7 Aug 2018 19:40:43 +0000 (12:40 -0700)
All control commands (soft commands) goes through only Queue 0
(control and data queue). So only queue-0 needs post_lock,
other queues are only data queues and does not need post_lock

Added a flag to indicate the queue can be used for soft commands.

If this flag is set, post_lock must be acquired before posting
a command to the queue.
If this flag is clear, post_lock is invalid for the queue.

Signed-off-by: Intiyaz Basha <intiyaz.basha@cavium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/cavium/liquidio/octeon_iq.h
drivers/net/ethernet/cavium/liquidio/request_manager.c

index 5fed7b6..2327062 100644 (file)
@@ -82,6 +82,16 @@ struct octeon_instr_queue {
        /** A spinlock to protect while posting on the ring.  */
        spinlock_t post_lock;
 
+       /** This flag indicates if the queue can be used for soft commands.
+        *  If this flag is set, post_lock must be acquired before posting
+        *  a command to the queue.
+        *  If this flag is clear, post_lock is invalid for the queue.
+        *  All control commands (soft commands) will go through only Queue 0
+        *  (control and data queue). So only queue-0 needs post_lock,
+        *  other queues are only data queues and does not need post_lock
+        */
+       bool allow_soft_cmds;
+
        u32 pkt_in_done;
 
        /** A spinlock to protect access to the input ring.*/
index d5d9e47..8f746e1 100644 (file)
@@ -126,7 +126,12 @@ int octeon_init_instr_queue(struct octeon_device *oct,
 
        /* Initialize the spinlock for this instruction queue */
        spin_lock_init(&iq->lock);
-       spin_lock_init(&iq->post_lock);
+       if (iq_no == 0) {
+               iq->allow_soft_cmds = true;
+               spin_lock_init(&iq->post_lock);
+       } else {
+               iq->allow_soft_cmds = false;
+       }
 
        spin_lock_init(&iq->iq_flush_running_lock);
 
@@ -566,7 +571,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
        /* Get the lock and prevent other tasks and tx interrupt handler from
         * running.
         */
-       spin_lock_bh(&iq->post_lock);
+       if (iq->allow_soft_cmds)
+               spin_lock_bh(&iq->post_lock);
 
        st = __post_command2(iq, cmd);
 
@@ -583,7 +589,8 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
        }
 
-       spin_unlock_bh(&iq->post_lock);
+       if (iq->allow_soft_cmds)
+               spin_unlock_bh(&iq->post_lock);
 
        /* This is only done here to expedite packets being flushed
         * for cases where there are no IQ completion interrupts.
@@ -702,11 +709,20 @@ octeon_prepare_soft_command(struct octeon_device *oct,
 int octeon_send_soft_command(struct octeon_device *oct,
                             struct octeon_soft_command *sc)
 {
+       struct octeon_instr_queue *iq;
        struct octeon_instr_ih2 *ih2;
        struct octeon_instr_ih3 *ih3;
        struct octeon_instr_irh *irh;
        u32 len;
 
+       iq = oct->instr_queue[sc->iq_no];
+       if (!iq->allow_soft_cmds) {
+               dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
+                       sc->iq_no);
+               INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
+               return IQ_SEND_FAILED;
+       }
+
        if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
                ih3 =  (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
                if (ih3->dlengsz) {