OSDN Git Service

iwlwifi: mvm: get rid of tx_path_lock
authorSara Sharon <sara.sharon@intel.com>
Thu, 25 Oct 2018 17:11:51 +0000 (20:11 +0300)
committerLuca Coelho <luciano.coelho@intel.com>
Tue, 29 Jan 2019 14:10:30 +0000 (16:10 +0200)
TX path lock was introduced in order to prevent out of order
invocations of TX.

This can happen in the following flow:

TX path invoked from net dev
Packet dequeued
TX path invoked from RX path
Packet dequeued
Packet TXed
Packet TXed

However, we don't really need a lock. If TX path is already
invoked from some location, other paths can simply abort their
execution, instead of waiting to the first path to finish, and
then discover queue is (likely) empty or stopped.

Replace the lock with an atomic variable to track TX ownership.
This simplifies the locking dependencies between RX and TX paths,
and should improve performance.

Signed-off-by: Sara Sharon <sara.sharon@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c

index e3da699..dd0761e 100644 (file)
@@ -878,25 +878,45 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
        struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
        struct sk_buff *skb = NULL;
 
-       spin_lock(&mvmtxq->tx_path_lock);
+       /*
+        * No need for threads to be pending here, they can leave the first
+        * taker all the work.
+        *
+        * mvmtxq->tx_request logic:
+        *
+        * If 0, no one is currently TXing, set to 1 to indicate current thread
+        * will now start TX and other threads should quit.
+        *
+        * If 1, another thread is currently TXing, set to 2 to indicate to
+        * that thread that there was another request. Since that request may
+        * have raced with the check whether the queue is empty, the TXing
+        * thread should check the queue's status one more time before leaving.
+        * This check is done in order to not leave any TX hanging in the queue
+        * until the next TX invocation (which may not even happen).
+        *
+        * If 2, another thread is currently TXing, and it will already double
+        * check the queue, so do nothing.
+        */
+       if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
+               return;
 
        rcu_read_lock();
-       while (likely(!mvmtxq->stopped &&
-                     (mvm->trans->system_pm_mode ==
-                      IWL_PLAT_PM_MODE_DISABLED))) {
-               skb = ieee80211_tx_dequeue(hw, txq);
+       do {
+               while (likely(!mvmtxq->stopped &&
+                             (mvm->trans->system_pm_mode ==
+                              IWL_PLAT_PM_MODE_DISABLED))) {
+                       skb = ieee80211_tx_dequeue(hw, txq);
 
-               if (!skb)
-                       break;
+                       if (!skb)
+                               break;
 
-               if (!txq->sta)
-                       iwl_mvm_tx_skb_non_sta(mvm, skb);
-               else
-                       iwl_mvm_tx_skb(mvm, skb, txq->sta);
-       }
+                       if (!txq->sta)
+                               iwl_mvm_tx_skb_non_sta(mvm, skb);
+                       else
+                               iwl_mvm_tx_skb(mvm, skb, txq->sta);
+               }
+       } while (atomic_dec_return(&mvmtxq->tx_request));
        rcu_read_unlock();
-
-       spin_unlock(&mvmtxq->tx_path_lock);
 }
 
 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
index d326843..a289b94 100644 (file)
@@ -781,8 +781,7 @@ struct iwl_mvm_geo_profile {
 struct iwl_mvm_txq {
        struct list_head list;
        u16 txq_id;
-       /* Protects TX path invocation from two places */
-       spinlock_t tx_path_lock;
+       atomic_t tx_request;
        bool stopped;
 };
 
index 5f42897..c5a0147 100644 (file)
@@ -1403,9 +1403,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 
                iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
                list_del_init(&mvmtxq->list);
-               local_bh_disable();
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
-               local_bh_enable();
        }
 
        mutex_unlock(&mvm->mutex);
@@ -1646,7 +1644,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 
                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
                INIT_LIST_HEAD(&mvmtxq->list);
-               spin_lock_init(&mvmtxq->tx_path_lock);
+               atomic_set(&mvmtxq->tx_request, 0);
        }
 
        mvm_sta->agg_tids = 0;