OSDN Git Service

Merge tag 'iwlwifi-next-for-kalle-2015-02-03' of https://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Fri, 6 Feb 2015 06:57:37 +0000 (08:57 +0200)
committerKalle Valo <kvalo@codeaurora.org>
Fri, 6 Feb 2015 06:57:37 +0000 (08:57 +0200)
* Add support for beamforming
* Enable stuck queue detection for iwlmvm
* A few fixes for EBS scan
* Fixes for various failure paths
* Improvements for TDLS Offchannel

26 files changed:
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-scd.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/tdls.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c

index de43dd7..c4d6dd7 100644 (file)
@@ -1228,11 +1228,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
        trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
        trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
-       if (!iwlwifi_mod_params.wd_disable)
-               trans_cfg.queue_watchdog_timeout =
-                       priv->cfg->base_params->wd_timeout;
-       else
-               trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
+       trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
+
        trans_cfg.command_names = iwl_dvm_cmd_strings;
        trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
 
index d1ce3ce..1e40a12 100644 (file)
@@ -715,7 +715,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
        fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
 
        iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
-                            buf_size, ssn);
+                            buf_size, ssn, 0);
 
        /*
         * If the limit is 0, then it wasn't initialised yet,
index d5cee15..4dbef7e 100644 (file)
@@ -267,7 +267,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
        for (i = 0; i < n_queues; i++)
                if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
                        iwl_trans_ac_txq_enable(priv->trans, i,
-                                               queue_to_txf[i]);
+                                               queue_to_txf[i], 0);
 
        priv->passive_no_rx = false;
        priv->transport_queue_stop = 0;
index 445bff6..4b190d9 100644 (file)
@@ -126,7 +126,7 @@ enum iwl_led_mode {
 
 /* TX queue watchdog timeouts in mSecs */
 #define IWL_WATCHDOG_DISABLED  0
-#define IWL_DEF_WD_TIMEOUT     2000
+#define IWL_DEF_WD_TIMEOUT     2500
 #define IWL_LONG_WD_TIMEOUT    10000
 #define IWL_MAX_WD_TIMEOUT     120000
 
index e7c0df6..996e7f1 100644 (file)
@@ -1367,7 +1367,6 @@ struct iwl_mod_params iwlwifi_mod_params = {
        .restart_fw = true,
        .bt_coex_active = true,
        .power_level = IWL_POWER_INDEX_1,
-       .wd_disable = true,
        .d0i3_disable = true,
 #ifndef CONFIG_IWLWIFI_UAPSD
        .uapsd_disable = true,
@@ -1478,10 +1477,6 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
 MODULE_PARM_DESC(antenna_coupling,
                 "specify antenna coupling in dB (default: 0 dB)");
 
-module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
-MODULE_PARM_DESC(wd_disable,
-               "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
-
 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
 MODULE_PARM_DESC(nvm_file, "NVM file name");
 
index e4f5898..016d913 100644 (file)
@@ -270,6 +270,7 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
  * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
  * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
  * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
  *     tx power value into TPC Report action frame and Link Measurement Report
@@ -288,6 +289,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
        IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = BIT(1),
        IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = BIT(2),
+       IWL_UCODE_TLV_CAPA_BEAMFORMER                   = BIT(3),
        IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = BIT(6),
        IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = BIT(8),
        IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = BIT(9),
index 2a8cf4b..e8eabd2 100644 (file)
@@ -96,7 +96,6 @@ enum iwl_disable_11n {
  *     use IWL_[DIS,EN]ABLE_HT_* constants
  * @amsdu_size_8K: enable 8K amsdu size, default = 0
  * @restart_fw: restart firmware, default = 1
- * @wd_disable: disable stuck queue check, default = 1
  * @bt_coex_active: enable bt coex, default = true
  * @led_mode: system default, default = 0
  * @power_save: enable power save, default = false
@@ -111,7 +110,6 @@ struct iwl_mod_params {
        unsigned int disable_11n;
        int amsdu_size_8K;
        bool restart_fw;
-       int  wd_disable;
        bool bt_coex_active;
        int led_mode;
        bool power_save;
index b21fcf0..6221e4d 100644 (file)
 #define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK                (0x0000007F)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS     (16)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK     (0x007F0000)
+#define SCD_GP_CTRL_ENABLE_31_QUEUES           BIT(0)
 
 /* Context Data */
 #define SCD_CONTEXT_MEM_LOWER_BOUND    (SCD_MEM_LOWER_BOUND + 0x600)
 #define SCD_CHAINEXT_EN                (SCD_BASE + 0x244)
 #define SCD_AGGR_SEL           (SCD_BASE + 0x248)
 #define SCD_INTERRUPT_MASK     (SCD_BASE + 0x108)
+#define SCD_GP_CTRL            (SCD_BASE + 0x1a8)
 #define SCD_EN_CTRL            (SCD_BASE + 0x254)
 
-static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
-{
-       if (chnl < 20)
-               return SCD_BASE + 0x18 + chnl * 4;
-       WARN_ON_ONCE(chnl >= 32);
-       return SCD_BASE + 0x284 + (chnl - 20) * 4;
-}
-
-static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
-{
-       if (chnl < 20)
-               return SCD_BASE + 0x68 + chnl * 4;
-       WARN_ON_ONCE(chnl >= 32);
-       return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
-}
-
-static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
-{
-       if (chnl < 20)
-               return SCD_BASE + 0x10c + chnl * 4;
-       WARN_ON_ONCE(chnl >= 32);
-       return SCD_BASE + 0x384 + (chnl - 20) * 4;
-}
-
 /*********************** END TX SCHEDULER *************************************/
 
 /* Oscillator clock */
index 6c622b2..f2353eb 100644 (file)
 #include "iwl-prph.h"
 
 
-static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
-                                           u16 txq_id)
-{
-       iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
-                      (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
-                      (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
 static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans,
                                         u16 txq_id)
 {
@@ -115,4 +107,37 @@ static inline void iwl_scd_enable_set_active(struct iwl_trans *trans,
 {
        iwl_write_prph(trans, SCD_EN_CTRL, value);
 }
+
+static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
+{
+       if (chnl < 20)
+               return SCD_BASE + 0x18 + chnl * 4;
+       WARN_ON_ONCE(chnl >= 32);
+       return SCD_BASE + 0x284 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
+{
+       if (chnl < 20)
+               return SCD_BASE + 0x68 + chnl * 4;
+       WARN_ON_ONCE(chnl >= 32);
+       return SCD_BASE + 0x2B4 + chnl * 4;
+}
+
+static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+{
+       if (chnl < 20)
+               return SCD_BASE + 0x10c + chnl * 4;
+       WARN_ON_ONCE(chnl >= 32);
+       return SCD_BASE + 0x334 + chnl * 4;
+}
+
+static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
+                                           u16 txq_id)
+{
+       iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+                      (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+                      (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
 #endif
index 84d8477..a96bd8d 100644 (file)
@@ -368,6 +368,7 @@ enum iwl_trans_status {
  * @cmd_queue: the index of the command queue.
  *     Must be set before start_fw.
  * @cmd_fifo: the fifo for host commands
+ * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
  * @no_reclaim_cmds: Some devices erroneously don't set the
  *     SEQ_RX_FRAME bit on some notifications, this is the
  *     list of such notifications to filter. Max length is
@@ -378,8 +379,6 @@ enum iwl_trans_status {
  * @bc_table_dword: set to true if the BC table expects the byte count to be
  *     in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
- * @queue_watchdog_timeout: time (in ms) after which queues
- *     are considered stuck and will trigger device restart
  * @command_names: array of command names, must be 256 entries
  *     (one for each command); for debugging only
  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -390,13 +389,13 @@ struct iwl_trans_config {
 
        u8 cmd_queue;
        u8 cmd_fifo;
+       unsigned int cmd_q_wdg_timeout;
        const u8 *no_reclaim_cmds;
        unsigned int n_no_reclaim_cmds;
 
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
-       unsigned int queue_watchdog_timeout;
        const char *const *command_names;
 
        u32 sdio_adma_addr;
@@ -511,7 +510,8 @@ struct iwl_trans_ops {
                        struct sk_buff_head *skbs);
 
        void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
-                          const struct iwl_trans_txq_scd_cfg *cfg);
+                          const struct iwl_trans_txq_scd_cfg *cfg,
+                          unsigned int queue_wdg_timeout);
        void (*txq_disable)(struct iwl_trans *trans, int queue,
                            bool configure_scd);
 
@@ -829,19 +829,21 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
 
 static inline void
 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
-                        const struct iwl_trans_txq_scd_cfg *cfg)
+                        const struct iwl_trans_txq_scd_cfg *cfg,
+                        unsigned int queue_wdg_timeout)
 {
        might_sleep();
 
        if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
                IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
-       trans->ops->txq_enable(trans, queue, ssn, cfg);
+       trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
 }
 
 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
                                        int fifo, int sta_id, int tid,
-                                       int frame_limit, u16 ssn)
+                                       int frame_limit, u16 ssn,
+                                       unsigned int queue_wdg_timeout)
 {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
@@ -851,11 +853,12 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
                .aggregate = sta_id >= 0,
        };
 
-       iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg);
+       iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
 }
 
-static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
-                                          int fifo)
+static inline
+void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
+                            unsigned int queue_wdg_timeout)
 {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
@@ -865,16 +868,16 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
                .aggregate = false,
        };
 
-       iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg);
+       iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
 }
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
-                                               u32 txq_bm)
+                                               u32 txqs)
 {
        if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
                IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
-       return trans->ops->wait_tx_queue_empty(trans, txq_bm);
+       return trans->ops->wait_tx_queue_empty(trans, txqs);
 }
 
 static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
index d91c46b..beba375 100644 (file)
@@ -99,7 +99,7 @@
 #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS  30
 #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL       0
 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL       0
-#define IWL_MVM_QUOTA_THRESHOLD                        8
+#define IWL_MVM_QUOTA_THRESHOLD                        4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO            0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
index 6a2a6b0..0f1ea80 100644 (file)
@@ -308,16 +308,41 @@ enum {
 #define LQ_FLAG_DYNAMIC_BW_POS          6
 #define LQ_FLAG_DYNAMIC_BW_MSK          (1 << LQ_FLAG_DYNAMIC_BW_POS)
 
-/* Single Stream Parameters
- * SS_STBC/BFER_ALLOWED - Controls whether STBC or Beamformer (BFER) is allowed
- * ucode will make a smart decision between SISO/STBC/BFER
- * SS_PARAMS_VALID - if not set ignore the ss_params field.
+/* Single Stream Tx Parameters (lq_cmd->ss_params)
+ * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
+ * used for single stream Tx.
  */
-enum {
-       RS_SS_STBC_ALLOWED = BIT(0),
-       RS_SS_BFER_ALLOWED = BIT(1),
-       RS_SS_PARAMS_VALID = BIT(31),
-};
+
+/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
+ * (0) - No STBC allowed
+ * (1) - 2x1 STBC allowed (HT/VHT)
+ * (2) - 4x2 STBC allowed (HT/VHT)
+ * (3) - 3x2 STBC allowed (HT only)
+ * All our chips are at most 2 antennas so only (1) is valid for now.
+ */
+#define LQ_SS_STBC_ALLOWED_POS          0
+#define LQ_SS_STBC_ALLOWED_MSK         (3 << LQ_SS_STBC_ALLOWED_MSK)
+
+/* 2x1 STBC is allowed */
+#define LQ_SS_STBC_1SS_ALLOWED         (1 << LQ_SS_STBC_ALLOWED_POS)
+
+/* Bit 2: Beamformer (VHT only) is allowed */
+#define LQ_SS_BFER_ALLOWED_POS         2
+#define LQ_SS_BFER_ALLOWED             (1 << LQ_SS_BFER_ALLOWED_POS)
+
+/* Bit 3: Force BFER or STBC for testing
+ * If this is set:
+ * If BFER is allowed then force the ucode to choose BFER else
+ * If STBC is allowed then force the ucode to choose STBC over SISO
+ */
+#define LQ_SS_FORCE_POS                        3
+#define LQ_SS_FORCE                    (1 << LQ_SS_FORCE_POS)
+
+/* Bit 31: ss_params field is valid. Used for FW backward compatibility
+ * with other drivers which don't support the ss_params API yet
+ */
+#define LQ_SS_PARAMS_VALID_POS         31
+#define LQ_SS_PARAMS_VALID             (1 << LQ_SS_PARAMS_VALID_POS)
 
 /**
  * struct iwl_lq_cmd - link quality command
index a322a5e..ca38e98 100644 (file)
@@ -575,7 +575,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
        }
 
-       iwl_mvm_get_shared_mem_conf(mvm);
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
+               iwl_mvm_get_shared_mem_conf(mvm);
 
        ret = iwl_mvm_sf_update(mvm, NULL, false);
        if (ret)
index 8bf78fa..7bdc622 100644 (file)
@@ -462,6 +462,9 @@ exit_fail:
 
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+                                       mvm->cfg->base_params->wd_timeout :
+                                       IWL_WATCHDOG_DISABLED;
        u32 ac;
        int ret;
 
@@ -474,16 +477,17 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        switch (vif->type) {
        case NL80211_IFTYPE_P2P_DEVICE:
                iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
-                                     IWL_MVM_TX_FIFO_VO);
+                                     IWL_MVM_TX_FIFO_VO, wdg_timeout);
                break;
        case NL80211_IFTYPE_AP:
                iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
-                                     IWL_MVM_TX_FIFO_MCAST);
+                                     IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
                /* fall through */
        default:
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
-                                             iwl_mvm_ac_to_tx_fifo[ac]);
+                                             iwl_mvm_ac_to_tx_fifo[ac],
+                                             wdg_timeout);
                break;
        }
 
index cef6f33..1ff7ec0 100644 (file)
@@ -401,10 +401,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
                        &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
-       if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
+       if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
 
+               if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
+                       hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
+                               IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+       }
+
        hw->wiphy->hw_version = mvm->trans->hw_id;
 
        if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
@@ -707,9 +712,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
        mvmvif->uploaded = false;
        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
 
-       /* does this make sense at all? */
-       mvmvif->color++;
-
        spin_lock_bh(&mvm->time_event_lock);
        iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
        spin_unlock_bh(&mvm->time_event_lock);
@@ -1353,7 +1355,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 
        ret = iwl_mvm_power_update_mac(mvm);
        if (ret)
-               goto out_release;
+               goto out_remove_mac;
 
        /* beacon filtering */
        ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
index 979ac23..6c69d05 100644 (file)
@@ -119,11 +119,13 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
  *     We will register to mac80211 to have testmode working. The NIC must not
  *     be up'ed after the INIT fw asserted. This is useful to be able to use
  *     proprietary tools over testmode to debug the INIT fw.
+ * @tfd_q_hang_detect: enabled the detection of hung transmit queues
  * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
  *     Save)-2(default), LP(Low Power)-3
  */
 struct iwl_mvm_mod_params {
        bool init_dbg;
+       bool tfd_q_hang_detect;
        int power_scheme;
 };
 extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -532,6 +534,7 @@ enum {
 enum iwl_mvm_tdls_cs_state {
        IWL_MVM_TDLS_SW_IDLE = 0,
        IWL_MVM_TDLS_SW_REQ_SENT,
+       IWL_MVM_TDLS_SW_RESP_RCVD,
        IWL_MVM_TDLS_SW_REQ_RCVD,
        IWL_MVM_TDLS_SW_ACTIVE,
 };
@@ -797,6 +800,9 @@ struct iwl_mvm {
                        struct cfg80211_chan_def chandef;
                        struct sk_buff *skb; /* ch sw template */
                        u32 ch_sw_tm_ie;
+
+                       /* timestamp of last ch-sw request sent (GP2 time) */
+                       u32 sent_timestamp;
                } peer;
        } tdls_cs;
 
@@ -874,7 +880,7 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
 
 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 {
-       return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_API_SCD_CFG;
+       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
 }
 
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@ -1312,11 +1318,13 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
 
 /* hw scheduler queue config */
 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-                       const struct iwl_trans_txq_scd_cfg *cfg);
+                       const struct iwl_trans_txq_scd_cfg *cfg,
+                       unsigned int wdg_timeout);
 void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
 
-static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
-                                        u8 fifo)
+static inline
+void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
+                          u8 fifo, unsigned int wdg_timeout)
 {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
@@ -1325,12 +1333,13 @@ static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
                .frame_limit = IWL_FRAME_LIMIT,
        };
 
-       iwl_mvm_enable_txq(mvm, queue, 0, &cfg);
+       iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
 }
 
 static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
                                          int fifo, int sta_id, int tid,
-                                         int frame_limit, u16 ssn)
+                                         int frame_limit, u16 ssn,
+                                         unsigned int wdg_timeout)
 {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
@@ -1340,7 +1349,7 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
                .aggregate = true,
        };
 
-       iwl_mvm_enable_txq(mvm, queue, ssn, &cfg);
+       iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
 }
 
 /* Assoc status */
index 8bf8c2a..2dffc36 100644 (file)
@@ -93,6 +93,7 @@ static const struct iwl_op_mode_ops iwl_mvm_ops;
 
 struct iwl_mvm_mod_params iwlmvm_mod_params = {
        .power_scheme = IWL_POWER_SCHEME_BPS,
+       .tfd_q_hang_detect = true
        /* rest of fields are 0 by default */
 };
 
@@ -102,6 +103,10 @@ MODULE_PARM_DESC(init_dbg,
 module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
 MODULE_PARM_DESC(power_scheme,
                 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
+                  bool, S_IRUGO);
+MODULE_PARM_DESC(tfd_q_hang_detect,
+                "TFD queues hang detection (default: true");
 
 /*
  * module init and exit functions
@@ -473,11 +478,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
                trans_cfg.bc_table_dword = true;
 
-       if (!iwlwifi_mod_params.wd_disable)
-               trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
-       else
-               trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
-
        trans_cfg.command_names = iwl_mvm_cmd_strings;
 
        trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
@@ -486,6 +486,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
 
+       /* Set a short watchdog for the command queue */
+       trans_cfg.cmd_q_wdg_timeout =
+               iwlmvm_mod_params.tfd_q_hang_detect ? IWL_DEF_WD_TIMEOUT :
+                                                     IWL_WATCHDOG_DISABLED;
+
        snprintf(mvm->hw->wiphy->fw_version,
                 sizeof(mvm->hw->wiphy->fw_version),
                 "%s", fw->fw_version);
@@ -563,6 +568,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (!mvm->scan_cmd)
                goto out_free;
 
+       /* Set EBS as successful as long as not stated otherwise by the FW. */
+       mvm->last_ebs_successful = true;
+
        err = iwl_mvm_mac_setup_register(mvm);
        if (err)
                goto out_free;
@@ -870,7 +878,10 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * If WoWLAN fw asserted, don't restart either, mac80211
         * can't recover this since we're already half suspended.
         */
-       if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+       if (!mvm->restart_fw && fw_error) {
+               schedule_work(&mvm->fw_error_dump_wk);
+       } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+                                   &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
 
                IWL_ERR(mvm,
@@ -894,16 +905,13 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
                reprobe->dev = mvm->trans->dev;
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
-       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
-                  (!fw_error || mvm->restart_fw)) {
+       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
                /* don't let the transport/FW power down */
                iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
                if (fw_error && mvm->restart_fw > 0)
                        mvm->restart_fw--;
                ieee80211_restart_hw(mvm->hw);
-       } else if (fw_error) {
-               schedule_work(&mvm->fw_error_dump_wk);
        }
 }
 
index 9f32f2d..194bd1f 100644 (file)
@@ -39,6 +39,7 @@
 #include "sta.h"
 #include "iwl-op-mode.h"
 #include "mvm.h"
+#include "debugfs.h"
 
 #define RS_NAME "iwl-mvm-rs"
 
@@ -1805,7 +1806,7 @@ static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
         * supports STBC of at least 1*SS
         */
-       if (!lq_sta->stbc)
+       if (!lq_sta->stbc_capable)
                return false;
 
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -2626,7 +2627,7 @@ static void rs_ht_init(struct iwl_mvm *mvm,
        if (mvm->cfg->ht_params->stbc &&
            (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
            (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
-               lq_sta->stbc = true;
+               lq_sta->stbc_capable = true;
 
        lq_sta->is_vht = false;
 }
@@ -2645,7 +2646,12 @@ static void rs_vht_init(struct iwl_mvm *mvm,
        if (mvm->cfg->ht_params->stbc &&
            (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
            (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
-               lq_sta->stbc = true;
+               lq_sta->stbc_capable = true;
+
+       if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+           (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+           (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+               lq_sta->bfer_capable = true;
 
        lq_sta->is_vht = true;
 }
@@ -2778,11 +2784,12 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
 
        IWL_DEBUG_RATE(mvm,
-                      "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d\n",
+                      "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
                       lq_sta->active_legacy_rate,
                       lq_sta->active_siso_rate,
                       lq_sta->active_mimo2_rate,
-                      lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc);
+                      lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
+                      lq_sta->bfer_capable);
        IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
                       lq_sta->max_legacy_rate_idx,
                       lq_sta->max_siso_rate_idx,
@@ -2916,23 +2923,15 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
        u8 valid_tx_ant = 0;
        struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
        bool toggle_ant = false;
-       bool stbc_allowed = false;
 
        memcpy(&rate, initial_rate, sizeof(rate));
 
        valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
 
-       stbc_allowed = rs_stbc_allow(mvm, sta, lq_sta);
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) {
-               u32 ss_params = RS_SS_PARAMS_VALID;
-
-               if (stbc_allowed)
-                       ss_params |= RS_SS_STBC_ALLOWED;
-               lq_cmd->ss_params = cpu_to_le32(ss_params);
-       } else {
-               /* TODO: remove old API when min FW API hits 14 */
-               rate.stbc = stbc_allowed;
-       }
+       /* TODO: remove old API when min FW API hits 14 */
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+           rs_stbc_allow(mvm, sta, lq_sta))
+               rate.stbc = true;
 
        if (is_siso(&rate)) {
                num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
@@ -2980,6 +2979,142 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
 
 }
 
+struct rs_bfer_active_iter_data {
+       struct ieee80211_sta *exclude_sta;
+       struct iwl_mvm_sta *bfer_mvmsta;
+};
+
+static void rs_bfer_active_iter(void *_data,
+                               struct ieee80211_sta *sta)
+{
+       struct rs_bfer_active_iter_data *data = _data;
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+       u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
+
+       if (sta == data->exclude_sta)
+               return;
+
+       /* The current sta has BFER allowed */
+       if (ss_params & LQ_SS_BFER_ALLOWED) {
+               WARN_ON_ONCE(data->bfer_mvmsta != NULL);
+
+               data->bfer_mvmsta = mvmsta;
+       }
+}
+
+static int rs_bfer_priority(struct iwl_mvm_sta *sta)
+{
+       int prio = -1;
+       enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
+
+       switch (viftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               prio = 3;
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               prio = 2;
+               break;
+       case NL80211_IFTYPE_STATION:
+               prio = 1;
+               break;
+       default:
+               WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
+               prio = -1;
+       }
+
+       return prio;
+}
+
+/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
+static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
+                               struct iwl_mvm_sta *sta2)
+{
+       int prio1 = rs_bfer_priority(sta1);
+       int prio2 = rs_bfer_priority(sta2);
+
+       if (prio1 > prio2)
+               return 1;
+       if (prio1 < prio2)
+               return -1;
+       return 0;
+}
+
+static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
+                               struct ieee80211_sta *sta,
+                               struct iwl_lq_sta *lq_sta,
+                               const struct rs_rate *initial_rate)
+{
+       struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct rs_bfer_active_iter_data data = {
+               .exclude_sta = sta,
+               .bfer_mvmsta = NULL,
+       };
+       struct iwl_mvm_sta *bfer_mvmsta = NULL;
+       u32 ss_params = LQ_SS_PARAMS_VALID;
+
+       if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+               goto out;
+
+       /* Check if forcing the decision is configured.
+        * Note that SISO is forced by not allowing STBC or BFER
+        */
+       if (lq_sta->ss_force == RS_SS_FORCE_STBC)
+               ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
+       else if (lq_sta->ss_force == RS_SS_FORCE_BFER)
+               ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
+
+       if (lq_sta->ss_force != RS_SS_FORCE_NONE) {
+               IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
+                              lq_sta->ss_force);
+               goto out;
+       }
+
+       if (lq_sta->stbc_capable)
+               ss_params |= LQ_SS_STBC_1SS_ALLOWED;
+
+       if (!lq_sta->bfer_capable)
+               goto out;
+
+       ieee80211_iterate_stations_atomic(mvm->hw,
+                                         rs_bfer_active_iter,
+                                         &data);
+       bfer_mvmsta = data.bfer_mvmsta;
+
+       /* This code is safe as it doesn't run concurrently for different
+        * stations. This is guaranteed by the fact that calls to
+        * ieee80211_tx_status wouldn't run concurrently for a single HW.
+        */
+       if (!bfer_mvmsta) {
+               IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
+
+               ss_params |= LQ_SS_BFER_ALLOWED;
+               goto out;
+       }
+
+       IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
+                      bfer_mvmsta->sta_id);
+
+       /* Disallow BFER on another STA if active and we're a higher priority */
+       if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
+               struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+               u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
+
+               bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
+               bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
+               iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
+
+               ss_params |= LQ_SS_BFER_ALLOWED;
+               IWL_DEBUG_RATE(mvm,
+                              "Lower priority BFER sta found (%d). Switch BFER\n",
+                              bfer_mvmsta->sta_id);
+       }
+out:
+       lq_cmd->ss_params = cpu_to_le32(ss_params);
+}
+
 static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
                           struct ieee80211_sta *sta,
                           struct iwl_lq_sta *lq_sta,
@@ -3006,6 +3141,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
 
        rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
 
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+               rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
+
        if (num_of_ant(initial_rate->ant) == 1)
                lq_cmd->single_stream_ant_msk = initial_rate->ant;
 
@@ -3379,9 +3517,73 @@ static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
+                                      char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       char buf[12];
+       int bufsz = sizeof(buf);
+       int pos = 0;
+       static const char * const ss_force_name[] = {
+               [RS_SS_FORCE_NONE] = "none",
+               [RS_SS_FORCE_STBC] = "stbc",
+               [RS_SS_FORCE_BFER] = "bfer",
+               [RS_SS_FORCE_SISO] = "siso",
+       };
+
+       pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
+                        ss_force_name[lq_sta->ss_force]);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = lq_sta->pers.drv;
+       int ret = 0;
+
+       if (!strncmp("none", buf, 4)) {
+               lq_sta->ss_force = RS_SS_FORCE_NONE;
+       } else if (!strncmp("siso", buf, 4)) {
+               lq_sta->ss_force = RS_SS_FORCE_SISO;
+       } else if (!strncmp("stbc", buf, 4)) {
+               if (lq_sta->stbc_capable) {
+                       lq_sta->ss_force = RS_SS_FORCE_STBC;
+               } else {
+                       IWL_ERR(mvm,
+                               "can't force STBC. peer doesn't support\n");
+                       ret = -EINVAL;
+               }
+       } else if (!strncmp("bfer", buf, 4)) {
+               if (lq_sta->bfer_capable) {
+                       lq_sta->ss_force = RS_SS_FORCE_BFER;
+               } else {
+                       IWL_ERR(mvm,
+                               "can't force BFER. peer doesn't support\n");
+                       ret = -EINVAL;
+               }
+       } else {
+               IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
+               ret = -EINVAL;
+       }
+       return ret ?: count;
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+       _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
+#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do {               \
+               if (!debugfs_create_file(#name, mode, parent, lq_sta,   \
+                                        &iwl_dbgfs_##name##_ops))      \
+                       goto err;                                       \
+       } while (0)
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
+
 static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
 {
        struct iwl_lq_sta *lq_sta = mvm_sta;
+
        debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
                            lq_sta, &rs_sta_dbgfs_scale_table_ops);
        debugfs_create_file("rate_stats_table", S_IRUSR, dir,
@@ -3392,6 +3594,11 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
                          &lq_sta->tx_agg_tid_en);
        debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
                          &lq_sta->pers.dbg_fixed_txp_reduction);
+
+       MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR);
+       return;
+err:
+       IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
 }
 
 static void rs_remove_debugfs(void *mvm, void *mvm_sta)
index f8f5bf2..dc4ef3d 100644 (file)
@@ -240,6 +240,13 @@ enum rs_column {
        RS_COLUMN_INVALID,
 };
 
+enum rs_ss_force_opt {
+       RS_SS_FORCE_NONE = 0,
+       RS_SS_FORCE_STBC,
+       RS_SS_FORCE_BFER,
+       RS_SS_FORCE_SISO,
+};
+
 /* Packet stats per rate */
 struct rs_rate_stats {
        u64 success;
@@ -293,7 +300,9 @@ struct iwl_lq_sta {
        u64 last_tx;
        bool is_vht;
        bool ldpc;              /* LDPC Rx is supported by the STA */
-       bool stbc;              /* Tx STBC is supported by chip and Rx by STA */
+       bool stbc_capable;      /* Tx STBC is supported by chip and Rx by STA */
+       bool bfer_capable;      /* Remote supports beamformee and we BFer */
+
        enum ieee80211_band band;
 
        /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -322,6 +331,9 @@ struct iwl_lq_sta {
        /* tx power reduce for this sta */
        int tpc_reduce;
 
+       /* force STBC/BFER/SISO for testing */
+       enum rs_ss_force_opt ss_force;
+
        /* persistent fields - initialized only once - keep last! */
        struct lq_sta_pers {
 #ifdef CONFIG_MAC80211_DEBUGFS
index 3bd5f34..7e9aa3c 100644 (file)
@@ -704,7 +704,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
        }
 
-       mvm->last_ebs_successful = !ebs_status;
+       if (ebs_status)
+               mvm->last_ebs_successful = false;
 
        return 0;
 }
@@ -1682,10 +1683,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
 
        band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
        for (i = 0; i < band->n_channels; i++, j++)
-               scan_config->channel_array[j] = band->channels[i].center_freq;
+               scan_config->channel_array[j] = band->channels[i].hw_value;
        band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
        for (i = 0; i < band->n_channels; i++, j++)
-               scan_config->channel_array[j] = band->channels[i].center_freq;
+               scan_config->channel_array[j] = band->channels[i].hw_value;
 
        cmd.data[0] = scan_config;
        cmd.len[0] = cmd_size;
@@ -1862,6 +1863,13 @@ int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
 
        cmd->general_flags = cpu_to_le32(flags);
+
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+           mvm->last_ebs_successful)
+               cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+                                    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
        cmd->n_channels = req->req.n_channels;
 
        for (i = 0; i < req->req.n_ssids; i++)
@@ -2025,7 +2033,9 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
                       notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
                                "success" : "failed");
 
-       mvm->last_ebs_successful = !notif->ebs_status;
+       if (notif->ebs_status)
+               mvm->last_ebs_successful = false;
+
        mvm->scan_uid[uid_idx] = 0;
 
        if (!sched) {
@@ -2058,10 +2068,14 @@ static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
 
        /*
         * Clear scan uid of scans that was aborted from above and completed
-        * in FW so the RX handler does nothing.
+        * in FW so the RX handler does nothing. Set last_ebs_successful here if
+        * needed.
         */
        scan_done->mvm->scan_uid[uid_idx] = 0;
 
+       if (notif->ebs_status)
+               scan_done->mvm->last_ebs_successful = false;
+
        return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
 }
 
index 14a8484..5c23cdd 100644 (file)
@@ -209,6 +209,9 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
 {
        unsigned long used_hw_queues;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+                                       mvm->cfg->base_params->wd_timeout :
+                                       IWL_WATCHDOG_DISABLED;
        u32 ac;
 
        lockdep_assert_held(&mvm->mutex);
@@ -232,7 +235,7 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
        /* Found a place for all queues - enable them */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
-                                     iwl_mvm_ac_to_tx_fifo[ac]);
+                                     iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
                mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
        }
 
@@ -626,13 +629,16 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 {
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+                                       mvm->cfg->base_params->wd_timeout :
+                                       IWL_WATCHDOG_DISABLED;
        int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Map Aux queue to fifo - needs to happen before adding Aux station */
        iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
-                             IWL_MVM_TX_FIFO_MCAST);
+                             IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
 
        /* Allocate aux station and assign to it the aux queue */
        ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -965,6 +971,9 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+                                       mvm->cfg->base_params->wd_timeout :
+                                       IWL_WATCHDOG_DISABLED;
        int queue, fifo, ret;
        u16 ssn;
 
@@ -988,7 +997,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EIO;
 
        iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-                              buf_size, ssn);
+                              buf_size, ssn, wdg_timeout);
 
        /*
         * Even though in theory the peer could have different
index c0e00ba..a87b506 100644 (file)
@@ -64,6 +64,8 @@
 #include <linux/etherdevice.h>
 #include "mvm.h"
 #include "time-event.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
 
 #define TU_TO_US(x) (x * 1024)
 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
@@ -228,6 +230,8 @@ iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
                return "IDLE";
        case IWL_MVM_TDLS_SW_REQ_SENT:
                return "REQ SENT";
+       case IWL_MVM_TDLS_SW_RESP_RCVD:
+               return "RESP RECEIVED";
        case IWL_MVM_TDLS_SW_REQ_RCVD:
                return "REQ RECEIVED";
        case IWL_MVM_TDLS_SW_ACTIVE:
@@ -248,6 +252,11 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
                       iwl_mvm_tdls_cs_state_str(state));
        mvm->tdls_cs.state = state;
 
+       /* we only send requests to our switching peer - update sent time */
+       if (state == IWL_MVM_TDLS_SW_REQ_SENT)
+               mvm->tdls_cs.peer.sent_timestamp =
+                       iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+
        if (state == IWL_MVM_TDLS_SW_IDLE)
                mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
 }
@@ -300,7 +309,7 @@ out:
 static int
 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
                          enum iwl_tdls_channel_switch_type type,
-                         const u8 *peer, bool peer_initiator)
+                         const u8 *peer, bool peer_initiator, u32 timestamp)
 {
        bool same_peer = false;
        int ret = 0;
@@ -325,17 +334,30 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
                        ret = -EINVAL;
                break;
        case IWL_MVM_TDLS_SW_REQ_SENT:
+               /* only allow requests from the same peer */
+               if (!same_peer)
+                       ret = -EBUSY;
+               else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
+                        !peer_initiator)
+                       /*
+                        * We received a ch-switch request while an outgoing
+                        * one is pending. Allow it if the peer is the link
+                        * initiator.
+                        */
+                       ret = -EBUSY;
+               else if (type == TDLS_SEND_CHAN_SW_REQ)
+                       /* wait for idle before sending another request */
+                       ret = -EBUSY;
+               else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
+                       /* we got a stale response - ignore it */
+                       ret = -EINVAL;
+               break;
+       case IWL_MVM_TDLS_SW_RESP_RCVD:
                /*
-                * We received a ch-switch request while an outgoing one is
-                * pending. Allow it to proceed if the other peer is the same
-                * one we sent to, and we are not the link initiator.
+                * we are waiting for the FW to give an "active" notification,
+                * so ignore requests in the meantime
                 */
-               if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) {
-                       if (!same_peer)
-                               ret = -EBUSY;
-                       else if (!peer_initiator) /* we are the initiator */
-                               ret = -EBUSY;
-               }
+               ret = -EBUSY;
                break;
        case IWL_MVM_TDLS_SW_REQ_RCVD:
                /* as above, allow the link initiator to proceed */
@@ -349,9 +371,12 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
                }
                break;
        case IWL_MVM_TDLS_SW_ACTIVE:
-               /* we don't allow initiations during active channel switch */
-               if (type == TDLS_SEND_CHAN_SW_REQ)
-                       ret = -EINVAL;
+               /*
+                * the only valid request when active is a request to return
+                * to the base channel by the current off-channel peer
+                */
+               if (type != TDLS_MOVE_CH || !same_peer)
+                       ret = -EBUSY;
                break;
        }
 
@@ -384,7 +409,8 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
+       ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
+                                       timestamp);
        if (ret)
                return ret;
 
@@ -473,6 +499,8 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
                                             type == TDLS_SEND_CHAN_SW_REQ ?
                                             IWL_MVM_TDLS_SW_REQ_SENT :
                                             IWL_MVM_TDLS_SW_REQ_RCVD);
+       } else {
+               iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
        }
 
 out:
@@ -657,12 +685,15 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        enum iwl_tdls_channel_switch_type type;
        unsigned int delay;
+       const char *action_str =
+               params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
+               "REQ" : "RESP";
 
        mutex_lock(&mvm->mutex);
 
        IWL_DEBUG_TDLS(mvm,
-                      "Received TDLS ch switch action %d from %pM status %d\n",
-                      params->action_code, params->sta->addr, params->status);
+                      "Received TDLS ch switch action %s from %pM status %d\n",
+                      action_str, params->sta->addr, params->status);
 
        /*
         * we got a non-zero status from a peer we were switching to - move to
index 4eb3cad..8decf99 100644 (file)
@@ -432,7 +432,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
                        mvm->status, table.valid);
        }
 
-       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+       IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
                desc_lookup(table.error_id));
        IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
        IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
@@ -531,7 +531,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 }
 
 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-                       const struct iwl_trans_txq_scd_cfg *cfg)
+                       const struct iwl_trans_txq_scd_cfg *cfg,
+                       unsigned int wdg_timeout)
 {
        struct iwl_scd_txq_cfg_cmd cmd = {
                .scd_queue = queue,
@@ -545,11 +546,12 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
        };
 
        if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
-               iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg);
+               iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg,
+                                        wdg_timeout);
                return;
        }
 
-       iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL);
+       iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
        WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
             "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
 }
index e5652d8..cae0eb8 100644 (file)
@@ -216,6 +216,7 @@ struct iwl_pcie_txq_scratch_buf {
  * @need_update: indicates need to update read/write index
  * @active: stores if queue is active
  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
+ * @wd_timeout: queue watchdog timeout (jiffies) - per queue
  *
  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  * descriptors) and required locking structures.
@@ -232,6 +233,7 @@ struct iwl_txq {
        bool need_update;
        u8 active;
        bool ampdu;
+       unsigned long wd_timeout;
 };
 
 static inline dma_addr_t
@@ -259,7 +261,6 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
  * @rx_page_order: page order for receive buffer size
- * @wd_timeout: queue watchdog timeout (jiffies)
  * @reg_lock: protect hw register access
  * @cmd_in_flight: true when we have a host command in flight
  * @fw_mon_phys: physical address of the buffer for the firmware monitor
@@ -302,6 +303,7 @@ struct iwl_trans_pcie {
 
        u8 cmd_queue;
        u8 cmd_fifo;
+       unsigned int cmd_q_wdg_timeout;
        u8 n_no_reclaim_cmds;
        u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
 
@@ -312,9 +314,6 @@ struct iwl_trans_pcie {
 
        const char *const *command_names;
 
-       /* queue watchdog */
-       unsigned long wd_timeout;
-
        /*protect hw register */
        spinlock_t reg_lock;
        bool cmd_in_flight;
@@ -373,7 +372,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
 int iwl_pcie_tx_stop(struct iwl_trans *trans);
 void iwl_pcie_tx_free(struct iwl_trans *trans);
 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
-                              const struct iwl_trans_txq_scd_cfg *cfg);
+                              const struct iwl_trans_txq_scd_cfg *cfg,
+                              unsigned int wdg_timeout);
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
                                bool configure_scd);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
index 1ff8767..69935aa 100644 (file)
@@ -75,6 +75,7 @@
 #include "iwl-trans.h"
 #include "iwl-csr.h"
 #include "iwl-prph.h"
+#include "iwl-scd.h"
 #include "iwl-agn-hw.h"
 #include "iwl-fw-error-dump.h"
 #include "internal.h"
@@ -1268,6 +1269,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
 
        trans_pcie->cmd_queue = trans_cfg->cmd_queue;
        trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
+       trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
                trans_pcie->n_no_reclaim_cmds = 0;
        else
@@ -1282,9 +1284,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        else
                trans_pcie->rx_page_order = get_order(4 * 1024);
 
-       trans_pcie->wd_timeout =
-               msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
-
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
index d40cd4a..af0bce7 100644 (file)
@@ -147,7 +147,6 @@ static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
 static void iwl_pcie_txq_stuck_timer(unsigned long data)
 {
        struct iwl_txq *txq = (void *)data;
-       struct iwl_queue *q = &txq->q;
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
        u32 scd_sram_addr = trans_pcie->scd_base_addr +
@@ -164,7 +163,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
        spin_unlock(&txq->lock);
 
        IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
-               jiffies_to_msecs(trans_pcie->wd_timeout));
+               jiffies_to_msecs(txq->wd_timeout));
        IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
                txq->q.read_ptr, txq->q.write_ptr);
 
@@ -198,11 +197,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
                        iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
        }
 
-       for (i = q->read_ptr; i != q->write_ptr;
-            i = iwl_queue_inc_wrap(i))
-               IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
-                       le32_to_cpu(txq->scratchbufs[i].scratch));
-
        iwl_force_nmi(trans);
 }
 
@@ -680,7 +674,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
                iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
 
        iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
-                               trans_pcie->cmd_fifo);
+                               trans_pcie->cmd_fifo,
+                               trans_pcie->cmd_q_wdg_timeout);
 
        /* Activate all Tx DMA/FIFO channels */
        iwl_scd_activate_fifos(trans);
@@ -722,7 +717,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
        iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
                           trans_pcie->kw.dma >> 4);
 
-       iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
+       /*
+        * Send 0 as the scd_base_addr since the device may have be reset
+        * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
+        * contain garbage.
+        */
+       iwl_pcie_tx_start(trans, 0);
 }
 
 /*
@@ -898,6 +898,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                }
        }
 
+       if (trans->cfg->base_params->num_of_queues > 20)
+               iwl_set_bits_prph(trans, SCD_GP_CTRL,
+                                 SCD_GP_CTRL_ENABLE_31_QUEUES);
+
        return 0;
 error:
        /*Upon error, free only if we allocated something */
@@ -906,10 +910,9 @@ error:
        return ret;
 }
 
-static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
-                                          struct iwl_txq *txq)
+static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
 {
-       if (!trans_pcie->wd_timeout)
+       if (!txq->wd_timeout)
                return;
 
        /*
@@ -919,7 +922,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
        if (txq->q.read_ptr == txq->q.write_ptr)
                del_timer(&txq->stuck_timer);
        else
-               mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+               mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 }
 
 /* Frees buffers until index _not_ inclusive */
@@ -981,7 +984,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                iwl_pcie_txq_free_tfd(trans, txq);
        }
 
-       iwl_pcie_txq_progress(trans_pcie, txq);
+       iwl_pcie_txq_progress(txq);
 
        if (iwl_queue_space(&txq->q) > txq->q.low_mark)
                iwl_wake_queue(trans, txq);
@@ -1109,7 +1112,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
                spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
        }
 
-       iwl_pcie_txq_progress(trans_pcie, txq);
+       iwl_pcie_txq_progress(txq);
 }
 
 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
@@ -1142,14 +1145,18 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
 #define BUILD_RAxTID(sta_id, tid)      (((sta_id) << 4) + (tid))
 
 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
-                              const struct iwl_trans_txq_scd_cfg *cfg)
+                              const struct iwl_trans_txq_scd_cfg *cfg,
+                              unsigned int wdg_timeout)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_txq *txq = &trans_pcie->txq[txq_id];
        int fifo = -1;
 
        if (test_and_set_bit(txq_id, trans_pcie->queue_used))
                WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
 
+       txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
+
        if (cfg) {
                fifo = cfg->fifo;
 
@@ -1173,7 +1180,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
 
                        /* enable aggregations for the queue */
                        iwl_scd_txq_enable_agg(trans, txq_id);
-                       trans_pcie->txq[txq_id].ampdu = true;
+                       txq->ampdu = true;
                } else {
                        /*
                         * disable aggregations for the queue, this will also
@@ -1182,14 +1189,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
                         */
                        iwl_scd_txq_disable_agg(trans, txq_id);
 
-                       ssn = trans_pcie->txq[txq_id].q.read_ptr;
+                       ssn = txq->q.read_ptr;
                }
        }
 
        /* Place first TFD at index corresponding to start sequence number.
         * Assumes that ssn_idx is valid (!= 0xFFF) */
-       trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
-       trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+       txq->q.read_ptr = (ssn & 0xff);
+       txq->q.write_ptr = (ssn & 0xff);
        iwl_write_direct32(trans, HBUS_TARG_WRPTR,
                           (ssn & 0xff) | (txq_id << 8));
 
@@ -1230,7 +1237,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
                                    txq_id, ssn & 0xff);
        }
 
-       trans_pcie->txq[txq_id].active = true;
+       txq->active = true;
 }
 
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
@@ -1495,8 +1502,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
 
        /* start timer if queue currently empty */
-       if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
-               mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+       if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+               mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 
        spin_lock_irqsave(&trans_pcie->reg_lock, flags);
        ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
@@ -1846,9 +1853,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr) {
-               if (txq->need_update && trans_pcie->wd_timeout)
-                       mod_timer(&txq->stuck_timer,
-                                 jiffies + trans_pcie->wd_timeout);
+               if (txq->wd_timeout)
+                       mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
                IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
                iwl_trans_pcie_ref(trans);
        }