1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
69 #include "iwl-debug.h"
74 #include "fw/api/rs.h"
77 * Will return 0 even if the cmd failed when RFKILL is asserted unless
78 * CMD_WANT_SKB is set in cmd->flags.
80 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
84 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
85 if (WARN_ON(mvm->d3_test_active))
90 * Synchronous commands from this op-mode must hold
91 * the mutex, this ensures we don't try to send two
92 * (or more) synchronous commands at a time.
94 if (!(cmd->flags & CMD_ASYNC)) {
95 lockdep_assert_held(&mvm->mutex);
96 if (!(cmd->flags & CMD_SEND_IN_IDLE))
97 iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
100 ret = iwl_trans_send_cmd(mvm->trans, cmd);
102 if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
103 iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
106 * If the caller wants the SKB, then don't hide any problems, the
107 * caller might access the response buffer which will be NULL if
108 * the command failed.
110 if (cmd->flags & CMD_WANT_SKB)
113 /* Silently ignore failures if RFKILL is asserted */
114 if (!ret || ret == -ERFKILL)
119 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
120 u32 flags, u16 len, const void *data)
122 struct iwl_host_cmd cmd = {
129 return iwl_mvm_send_cmd(mvm, &cmd);
133 * We assume that the caller set the status to the success value
135 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
138 struct iwl_rx_packet *pkt;
139 struct iwl_cmd_response *resp;
142 lockdep_assert_held(&mvm->mutex);
144 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
145 if (WARN_ON(mvm->d3_test_active))
150 * Only synchronous commands can wait for status,
151 * we use WANT_SKB so the caller can't.
153 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
154 "cmd flags %x", cmd->flags))
157 cmd->flags |= CMD_WANT_SKB;
159 ret = iwl_trans_send_cmd(mvm->trans, cmd);
160 if (ret == -ERFKILL) {
162 * The command failed because of RFKILL, don't update
163 * the status, leave it as success and return 0.
172 resp_len = iwl_rx_packet_payload_len(pkt);
173 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
178 resp = (void *)pkt->data;
179 *status = le32_to_cpu(resp->status);
186 * We assume that the caller set the status to the sucess value
188 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
189 const void *data, u32 *status)
191 struct iwl_host_cmd cmd = {
197 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
200 #define IWL_DECLARE_RATE_INFO(r) \
201 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
204 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
206 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
207 IWL_DECLARE_RATE_INFO(1),
208 IWL_DECLARE_RATE_INFO(2),
209 IWL_DECLARE_RATE_INFO(5),
210 IWL_DECLARE_RATE_INFO(11),
211 IWL_DECLARE_RATE_INFO(6),
212 IWL_DECLARE_RATE_INFO(9),
213 IWL_DECLARE_RATE_INFO(12),
214 IWL_DECLARE_RATE_INFO(18),
215 IWL_DECLARE_RATE_INFO(24),
216 IWL_DECLARE_RATE_INFO(36),
217 IWL_DECLARE_RATE_INFO(48),
218 IWL_DECLARE_RATE_INFO(54),
221 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
222 enum nl80211_band band)
224 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
228 /* Legacy rate format, search for match in table */
229 if (band == NL80211_BAND_5GHZ)
230 band_offset = IWL_FIRST_OFDM_RATE;
231 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
232 if (fw_rate_idx_to_plcp[idx] == rate)
233 return idx - band_offset;
238 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
240 /* Get PLCP rate for tx_cmd->rate_n_flags */
241 return fw_rate_idx_to_plcp[rate_idx];
244 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
246 struct iwl_rx_packet *pkt = rxb_addr(rxb);
247 struct iwl_error_resp *err_resp = (void *)pkt->data;
249 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
250 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
251 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
252 le16_to_cpu(err_resp->bad_cmd_seq_num),
253 le32_to_cpu(err_resp->error_service));
254 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
255 le64_to_cpu(err_resp->timestamp));
259 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
260 * The parameter should also be a combination of ANT_[ABC].
262 u8 first_antenna(u8 mask)
264 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
265 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
267 return BIT(ffs(mask) - 1);
271 * Toggles between TX antennas to send the probe request on.
272 * Receives the bitmask of valid TX antennas and the *index* used
273 * for the last TX, and returns the next valid *index* to use.
274 * In order to set it in the tx_cmd, must do BIT(idx).
276 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
281 for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
282 ind = (ind + 1) % RATE_MCS_ANT_NUM;
283 if (valid & BIT(ind))
287 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
291 static const struct {
294 } advanced_lookup[] = {
295 { "NMI_INTERRUPT_WDG", 0x34 },
296 { "SYSASSERT", 0x35 },
297 { "UCODE_VERSION_MISMATCH", 0x37 },
298 { "BAD_COMMAND", 0x38 },
299 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
300 { "FATAL_ERROR", 0x3D },
301 { "NMI_TRM_HW_ERR", 0x46 },
302 { "NMI_INTERRUPT_TRM", 0x4C },
303 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
304 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
305 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
306 { "NMI_INTERRUPT_HOST", 0x66 },
307 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
308 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
309 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
310 { "ADVANCED_SYSASSERT", 0 },
313 static const char *desc_lookup(u32 num)
317 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
318 if (advanced_lookup[i].num == num)
319 return advanced_lookup[i].name;
321 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
322 return advanced_lookup[i].name;
326 * Note: This structure is read from the device with IO accesses,
327 * and the reading already does the endian conversion. As it is
328 * read with u32-sized accesses, any members with a different size
329 * need to be ordered correctly though!
331 struct iwl_error_event_table_v1 {
332 u32 valid; /* (nonzero) valid, (0) log is empty */
333 u32 error_id; /* type of error */
334 u32 pc; /* program counter */
335 u32 blink1; /* branch link */
336 u32 blink2; /* branch link */
337 u32 ilink1; /* interrupt link */
338 u32 ilink2; /* interrupt link */
339 u32 data1; /* error-specific data */
340 u32 data2; /* error-specific data */
341 u32 data3; /* error-specific data */
342 u32 bcon_time; /* beacon timer */
343 u32 tsf_low; /* network timestamp function timer */
344 u32 tsf_hi; /* network timestamp function timer */
345 u32 gp1; /* GP1 timer register */
346 u32 gp2; /* GP2 timer register */
347 u32 gp3; /* GP3 timer register */
348 u32 ucode_ver; /* uCode version */
349 u32 hw_ver; /* HW Silicon version */
350 u32 brd_ver; /* HW board version */
351 u32 log_pc; /* log program counter */
352 u32 frame_ptr; /* frame pointer */
353 u32 stack_ptr; /* stack pointer */
354 u32 hcmd; /* last host command header */
355 u32 isr0; /* isr status register LMPM_NIC_ISR0:
357 u32 isr1; /* isr status register LMPM_NIC_ISR1:
359 u32 isr2; /* isr status register LMPM_NIC_ISR2:
361 u32 isr3; /* isr status register LMPM_NIC_ISR3:
363 u32 isr4; /* isr status register LMPM_NIC_ISR4:
365 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
366 u32 wait_event; /* wait event() caller address */
367 u32 l2p_control; /* L2pControlField */
368 u32 l2p_duration; /* L2pDurationField */
369 u32 l2p_mhvalid; /* L2pMhValidBits */
370 u32 l2p_addr_match; /* L2pAddrMatchStat */
371 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
373 u32 u_timestamp; /* indicate when the date and time of the
375 u32 flow_handler; /* FH read/write pointers, RX credit */
376 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
378 struct iwl_error_event_table {
379 u32 valid; /* (nonzero) valid, (0) log is empty */
380 u32 error_id; /* type of error */
381 u32 trm_hw_status0; /* TRM HW status */
382 u32 trm_hw_status1; /* TRM HW status */
383 u32 blink2; /* branch link */
384 u32 ilink1; /* interrupt link */
385 u32 ilink2; /* interrupt link */
386 u32 data1; /* error-specific data */
387 u32 data2; /* error-specific data */
388 u32 data3; /* error-specific data */
389 u32 bcon_time; /* beacon timer */
390 u32 tsf_low; /* network timestamp function timer */
391 u32 tsf_hi; /* network timestamp function timer */
392 u32 gp1; /* GP1 timer register */
393 u32 gp2; /* GP2 timer register */
394 u32 fw_rev_type; /* firmware revision type */
395 u32 major; /* uCode version major */
396 u32 minor; /* uCode version minor */
397 u32 hw_ver; /* HW Silicon version */
398 u32 brd_ver; /* HW board version */
399 u32 log_pc; /* log program counter */
400 u32 frame_ptr; /* frame pointer */
401 u32 stack_ptr; /* stack pointer */
402 u32 hcmd; /* last host command header */
403 u32 isr0; /* isr status register LMPM_NIC_ISR0:
405 u32 isr1; /* isr status register LMPM_NIC_ISR1:
407 u32 isr2; /* isr status register LMPM_NIC_ISR2:
409 u32 isr3; /* isr status register LMPM_NIC_ISR3:
411 u32 isr4; /* isr status register LMPM_NIC_ISR4:
413 u32 last_cmd_id; /* last HCMD id handled by the firmware */
414 u32 wait_event; /* wait event() caller address */
415 u32 l2p_control; /* L2pControlField */
416 u32 l2p_duration; /* L2pDurationField */
417 u32 l2p_mhvalid; /* L2pMhValidBits */
418 u32 l2p_addr_match; /* L2pAddrMatchStat */
419 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
421 u32 u_timestamp; /* indicate when the date and time of the
423 u32 flow_handler; /* FH read/write pointers, RX credit */
424 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
427 * UMAC error struct - relevant starting from family 8000 chip.
428 * Note: This structure is read from the device with IO accesses,
429 * and the reading already does the endian conversion. As it is
430 * read with u32-sized accesses, any members with a different size
431 * need to be ordered correctly though!
433 struct iwl_umac_error_event_table {
434 u32 valid; /* (nonzero) valid, (0) log is empty */
435 u32 error_id; /* type of error */
436 u32 blink1; /* branch link */
437 u32 blink2; /* branch link */
438 u32 ilink1; /* interrupt link */
439 u32 ilink2; /* interrupt link */
440 u32 data1; /* error-specific data */
441 u32 data2; /* error-specific data */
442 u32 data3; /* error-specific data */
445 u32 frame_pointer; /* core register 27*/
446 u32 stack_pointer; /* core register 28 */
447 u32 cmd_header; /* latest host cmd sent to UMAC */
448 u32 nic_isr_pref; /* ISR status register */
451 #define ERROR_START_OFFSET (1 * sizeof(u32))
452 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
454 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
456 struct iwl_trans *trans = mvm->trans;
457 struct iwl_umac_error_event_table table;
459 if (!mvm->support_umac_log)
462 iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
465 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
466 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
467 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
468 mvm->status, table.valid);
471 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
472 desc_lookup(table.error_id));
473 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
474 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
475 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
476 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
477 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
478 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
479 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
480 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
481 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
482 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
483 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
484 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
485 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
488 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
490 struct iwl_trans *trans = mvm->trans;
491 struct iwl_error_event_table table;
494 if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
496 base = mvm->fw->init_errlog_ptr;
499 base = mvm->fw->inst_errlog_ptr;
502 if (base < 0x400000) {
504 "Not valid error log pointer 0x%08X for %s uCode\n",
506 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
511 /* check if there is a HW error */
512 val = iwl_trans_read_mem32(trans, base);
513 if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
516 IWL_ERR(trans, "HW error, resetting before reading\n");
518 /* reset the device */
519 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
520 usleep_range(5000, 6000);
522 /* set INIT_DONE flag */
523 iwl_set_bit(trans, CSR_GP_CNTRL,
524 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
526 /* and wait for clock stabilization */
527 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
530 err = iwl_poll_bit(trans, CSR_GP_CNTRL,
531 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
532 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
535 IWL_DEBUG_INFO(trans,
536 "Failed to reset the card for the dump\n");
541 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
543 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
544 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
545 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
546 mvm->status, table.valid);
549 /* Do not change this output - scripts rely on it */
551 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
553 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
554 table.data1, table.data2, table.data3,
555 table.blink2, table.ilink1,
556 table.ilink2, table.bcon_time, table.gp1,
557 table.gp2, table.fw_rev_type, table.major,
558 table.minor, table.hw_ver, table.brd_ver);
559 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
560 desc_lookup(table.error_id));
561 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
562 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
563 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
564 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
565 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
566 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
567 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
568 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
569 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
570 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
571 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
572 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
573 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
574 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
575 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
576 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
577 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
578 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
579 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
580 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
581 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
582 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
583 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
584 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
585 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
586 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
587 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
588 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
589 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
590 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
591 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
592 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
593 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
596 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
598 iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
600 if (mvm->error_event_table[1])
601 iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
603 iwl_mvm_dump_umac_error_log(mvm);
606 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
610 lockdep_assert_held(&mvm->queue_info_lock);
612 /* This should not be hit with new TX path */
613 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
616 /* Start by looking for a free queue */
617 for (i = minq; i <= maxq; i++)
618 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
619 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
623 * If no free queue found - settle for an inactive one to reconfigure
624 * Make sure that the inactive queue either already belongs to this STA,
625 * or that if it belongs to another one - it isn't the reserved queue
627 for (i = minq; i <= maxq; i++)
628 if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
629 (sta_id == mvm->queue_info[i].ra_sta_id ||
630 !mvm->queue_info[i].reserved))
636 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
637 int tid, int frame_limit, u16 ssn)
639 struct iwl_scd_txq_cfg_cmd cmd = {
641 .action = SCD_CFG_ENABLE_QUEUE,
642 .window = frame_limit,
644 .ssn = cpu_to_le16(ssn),
646 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
647 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
652 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
655 spin_lock_bh(&mvm->queue_info_lock);
656 if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
657 "Trying to reconfig unallocated queue %d\n", queue)) {
658 spin_unlock_bh(&mvm->queue_info_lock);
661 spin_unlock_bh(&mvm->queue_info_lock);
663 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
665 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
666 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
672 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
673 int mac80211_queue, u8 sta_id, u8 tid)
675 bool enable_queue = true;
677 spin_lock_bh(&mvm->queue_info_lock);
679 /* Make sure this TID isn't already enabled */
680 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
681 spin_unlock_bh(&mvm->queue_info_lock);
682 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
687 /* Update mappings and refcounts */
688 if (mvm->queue_info[queue].hw_queue_refcount > 0)
689 enable_queue = false;
691 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
692 WARN(mac80211_queue >=
693 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
694 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
695 mac80211_queue, queue, sta_id, tid);
696 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
699 mvm->queue_info[queue].hw_queue_refcount++;
700 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
701 mvm->queue_info[queue].ra_sta_id = sta_id;
704 if (tid != IWL_MAX_TID_COUNT)
705 mvm->queue_info[queue].mac80211_ac =
706 tid_to_mac80211_ac[tid];
708 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
710 mvm->queue_info[queue].txq_tid = tid;
713 IWL_DEBUG_TX_QUEUES(mvm,
714 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
715 queue, mvm->queue_info[queue].hw_queue_refcount,
716 mvm->hw_queue_to_mac80211[queue]);
718 spin_unlock_bh(&mvm->queue_info_lock);
723 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
724 u8 sta_id, u8 tid, unsigned int timeout)
726 struct iwl_tx_queue_cfg_cmd cmd = {
727 .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
733 if (cmd.tid == IWL_MAX_TID_COUNT)
734 cmd.tid = IWL_MGMT_TID;
735 queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
736 SCD_QUEUE_CFG, timeout);
739 IWL_DEBUG_TX_QUEUES(mvm,
740 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
745 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
748 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
749 IWL_DEBUG_TX_QUEUES(mvm,
750 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
751 queue, mvm->hw_queue_to_mac80211[queue]);
756 bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
757 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
758 unsigned int wdg_timeout)
760 struct iwl_scd_txq_cfg_cmd cmd = {
762 .action = SCD_CFG_ENABLE_QUEUE,
763 .window = cfg->frame_limit,
764 .sta_id = cfg->sta_id,
765 .ssn = cpu_to_le16(ssn),
766 .tx_fifo = cfg->fifo,
767 .aggregate = cfg->aggregate,
772 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
775 /* Send the enabling command if we need to */
776 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
777 cfg->sta_id, cfg->tid))
780 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
783 le16_add_cpu(&cmd.ssn, 1);
785 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
786 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
791 int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
794 struct iwl_scd_txq_cfg_cmd cmd = {
796 .action = SCD_CFG_DISABLE_QUEUE,
798 bool remove_mac_queue = true;
801 if (iwl_mvm_has_new_tx_api(mvm)) {
802 spin_lock_bh(&mvm->queue_info_lock);
803 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
804 spin_unlock_bh(&mvm->queue_info_lock);
806 iwl_trans_txq_free(mvm->trans, queue);
811 spin_lock_bh(&mvm->queue_info_lock);
813 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
814 spin_unlock_bh(&mvm->queue_info_lock);
818 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
821 * If there is another TID with the same AC - don't remove the MAC queue
824 if (tid < IWL_MAX_TID_COUNT) {
825 unsigned long tid_bitmap =
826 mvm->queue_info[queue].tid_bitmap;
827 int ac = tid_to_mac80211_ac[tid];
830 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
831 if (tid_to_mac80211_ac[i] == ac)
832 remove_mac_queue = false;
836 if (remove_mac_queue)
837 mvm->hw_queue_to_mac80211[queue] &=
838 ~BIT(mac80211_queue);
839 mvm->queue_info[queue].hw_queue_refcount--;
841 cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
842 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
843 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
844 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
846 IWL_DEBUG_TX_QUEUES(mvm,
847 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
849 mvm->queue_info[queue].hw_queue_refcount,
850 mvm->hw_queue_to_mac80211[queue]);
852 /* If the queue is still enabled - nothing left to do in this func */
853 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
854 spin_unlock_bh(&mvm->queue_info_lock);
858 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
859 cmd.tid = mvm->queue_info[queue].txq_tid;
861 /* Make sure queue info is correct even though we overwrite it */
862 WARN(mvm->queue_info[queue].hw_queue_refcount ||
863 mvm->queue_info[queue].tid_bitmap ||
864 mvm->hw_queue_to_mac80211[queue],
865 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
866 queue, mvm->queue_info[queue].hw_queue_refcount,
867 mvm->hw_queue_to_mac80211[queue],
868 mvm->queue_info[queue].tid_bitmap);
870 /* If we are here - the queue is freed and we can zero out these vals */
871 mvm->queue_info[queue].hw_queue_refcount = 0;
872 mvm->queue_info[queue].tid_bitmap = 0;
873 mvm->hw_queue_to_mac80211[queue] = 0;
875 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
876 mvm->queue_info[queue].reserved = false;
878 spin_unlock_bh(&mvm->queue_info_lock);
880 iwl_trans_txq_disable(mvm->trans, queue, false);
881 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
882 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
885 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
891 * iwl_mvm_send_lq_cmd() - Send link quality command
892 * @init: This command is sent as part of station initialization right
893 * after station has been added.
895 * The link quality command is sent as the last step of station creation.
896 * This is the special case in which init is set and we call a callback in
897 * this case to clear the state indicating that station creation is in
900 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
902 struct iwl_host_cmd cmd = {
904 .len = { sizeof(struct iwl_lq_cmd), },
905 .flags = init ? 0 : CMD_ASYNC,
909 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
912 return iwl_mvm_send_cmd(mvm, &cmd);
916 * iwl_mvm_update_smps - Get a request to change the SMPS mode
917 * @req_type: The part of the driver who call for a change.
918 * @smps_requests: The request to change the SMPS mode.
920 * Get a requst to change the SMPS mode,
921 * and change it according to all other requests in the driver.
923 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
924 enum iwl_mvm_smps_type_request req_type,
925 enum ieee80211_smps_mode smps_request)
927 struct iwl_mvm_vif *mvmvif;
928 enum ieee80211_smps_mode smps_mode;
931 lockdep_assert_held(&mvm->mutex);
933 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
934 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
937 if (vif->type == NL80211_IFTYPE_AP)
938 smps_mode = IEEE80211_SMPS_OFF;
940 smps_mode = IEEE80211_SMPS_AUTOMATIC;
942 mvmvif = iwl_mvm_vif_from_mac80211(vif);
943 mvmvif->smps_requests[req_type] = smps_request;
944 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
945 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
946 smps_mode = IEEE80211_SMPS_STATIC;
949 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
950 smps_mode = IEEE80211_SMPS_DYNAMIC;
953 ieee80211_request_smps(vif, smps_mode);
956 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
958 struct iwl_statistics_cmd scmd = {
959 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
961 struct iwl_host_cmd cmd = {
962 .id = STATISTICS_CMD,
963 .len[0] = sizeof(scmd),
965 .flags = CMD_WANT_SKB,
969 ret = iwl_mvm_send_cmd(mvm, &cmd);
973 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
977 iwl_mvm_accu_radio_stats(mvm);
982 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
984 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
985 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
986 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
987 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
990 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
991 struct ieee80211_vif *vif)
993 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
994 bool *result = _data;
997 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
998 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
999 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
1004 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
1008 lockdep_assert_held(&mvm->mutex);
1010 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
1013 if (mvm->cfg->rx_with_siso_diversity)
1016 ieee80211_iterate_active_interfaces_atomic(
1017 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1018 iwl_mvm_diversity_iter, &result);
1023 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1026 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1029 lockdep_assert_held(&mvm->mutex);
1031 if (iwl_mvm_vif_low_latency(mvmvif) == prev)
1034 res = iwl_mvm_update_quotas(mvm, false, NULL);
1038 iwl_mvm_bt_coex_vif_change(mvm);
1040 return iwl_mvm_power_update_mac(mvm);
1043 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
1045 bool *result = _data;
1047 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
1051 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
1053 bool result = false;
1055 ieee80211_iterate_active_interfaces_atomic(
1056 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1057 iwl_mvm_ll_iter, &result);
1062 struct iwl_bss_iter_data {
1063 struct ieee80211_vif *vif;
1067 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
1068 struct ieee80211_vif *vif)
1070 struct iwl_bss_iter_data *data = _data;
1072 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1083 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
1085 struct iwl_bss_iter_data bss_iter_data = {};
1087 ieee80211_iterate_active_interfaces_atomic(
1088 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1089 iwl_mvm_bss_iface_iterator, &bss_iter_data);
1091 if (bss_iter_data.error) {
1092 IWL_ERR(mvm, "More than one managed interface active!\n");
1093 return ERR_PTR(-EINVAL);
1096 return bss_iter_data.vif;
1099 struct iwl_sta_iter_data {
1103 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
1104 struct ieee80211_vif *vif)
1106 struct iwl_sta_iter_data *data = _data;
1108 if (vif->type != NL80211_IFTYPE_STATION)
1111 if (vif->bss_conf.assoc)
1115 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
1117 struct iwl_sta_iter_data data = {
1121 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1122 IEEE80211_IFACE_ITER_NORMAL,
1123 iwl_mvm_sta_iface_iterator,
1128 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1129 struct ieee80211_vif *vif,
1130 bool tdls, bool cmd_q)
1132 struct iwl_fw_dbg_trigger_tlv *trigger;
1133 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
1134 unsigned int default_timeout =
1135 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1137 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
1139 * We can't know when the station is asleep or awake, so we
1140 * must disable the queue hang detection.
1142 if (fw_has_capa(&mvm->fw->ucode_capa,
1143 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
1144 vif && vif->type == NL80211_IFTYPE_AP)
1145 return IWL_WATCHDOG_DISABLED;
1146 return iwlmvm_mod_params.tfd_q_hang_detect ?
1147 default_timeout : IWL_WATCHDOG_DISABLED;
1150 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1151 txq_timer = (void *)trigger->data;
1154 return le32_to_cpu(txq_timer->tdls);
1157 return le32_to_cpu(txq_timer->command_queue);
1160 return default_timeout;
1162 switch (ieee80211_vif_type_p2p(vif)) {
1163 case NL80211_IFTYPE_ADHOC:
1164 return le32_to_cpu(txq_timer->ibss);
1165 case NL80211_IFTYPE_STATION:
1166 return le32_to_cpu(txq_timer->bss);
1167 case NL80211_IFTYPE_AP:
1168 return le32_to_cpu(txq_timer->softap);
1169 case NL80211_IFTYPE_P2P_CLIENT:
1170 return le32_to_cpu(txq_timer->p2p_client);
1171 case NL80211_IFTYPE_P2P_GO:
1172 return le32_to_cpu(txq_timer->p2p_go);
1173 case NL80211_IFTYPE_P2P_DEVICE:
1174 return le32_to_cpu(txq_timer->p2p_device);
1177 return mvm->cfg->base_params->wd_timeout;
1181 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1184 struct iwl_fw_dbg_trigger_tlv *trig;
1185 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
1187 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
1190 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
1191 trig_mlme = (void *)trig->data;
1192 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1193 ieee80211_vif_to_wdev(vif), trig))
1196 if (trig_mlme->stop_connection_loss &&
1197 --trig_mlme->stop_connection_loss)
1200 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
1203 ieee80211_connection_loss(vif);
1207 * Remove inactive TIDs of a given queue.
1208 * If all queue TIDs are inactive - mark the queue as inactive
1209 * If only some the queue TIDs are inactive - unmap them from the queue
1211 static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1212 struct iwl_mvm_sta *mvmsta, int queue,
1213 unsigned long tid_bitmap)
1217 lockdep_assert_held(&mvmsta->lock);
1218 lockdep_assert_held(&mvm->queue_info_lock);
1220 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1223 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1224 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1225 /* If some TFDs are still queued - don't mark TID as inactive */
1226 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1227 tid_bitmap &= ~BIT(tid);
1229 /* Don't mark as inactive any TID that has an active BA */
1230 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1231 tid_bitmap &= ~BIT(tid);
1234 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1235 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1236 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1238 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1239 mvmsta->tid_data[tid].is_tid_active = false;
1241 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1247 * If we are here, this is a shared queue and not all TIDs timed-out.
1248 * Remove the ones that did.
1250 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1251 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1253 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1254 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1255 mvm->queue_info[queue].hw_queue_refcount--;
1256 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1257 mvmsta->tid_data[tid].is_tid_active = false;
1259 IWL_DEBUG_TX_QUEUES(mvm,
1260 "Removing inactive TID %d from shared Q:%d\n",
1264 IWL_DEBUG_TX_QUEUES(mvm,
1265 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1266 mvm->queue_info[queue].tid_bitmap);
1269 * There may be different TIDs with the same mac queues, so make
1270 * sure all TIDs have existing corresponding mac queues enabled
1272 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1273 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1274 mvm->hw_queue_to_mac80211[queue] |=
1275 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1278 /* If the queue is marked as shared - "unshare" it */
1279 if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
1280 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1281 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1282 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1287 void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1289 unsigned long timeout_queues_map = 0;
1290 unsigned long now = jiffies;
1293 if (iwl_mvm_has_new_tx_api(mvm))
1296 spin_lock_bh(&mvm->queue_info_lock);
1297 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1298 if (mvm->queue_info[i].hw_queue_refcount > 0)
1299 timeout_queues_map |= BIT(i);
1300 spin_unlock_bh(&mvm->queue_info_lock);
1305 * If a queue time outs - mark it as INACTIVE (don't remove right away
1306 * if we don't have to.) This is an optimization in case traffic comes
1307 * later, and we don't HAVE to use a currently-inactive queue
1309 for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
1310 struct ieee80211_sta *sta;
1311 struct iwl_mvm_sta *mvmsta;
1314 unsigned long inactive_tid_bitmap = 0;
1315 unsigned long queue_tid_bitmap;
1317 spin_lock_bh(&mvm->queue_info_lock);
1318 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1320 /* If TXQ isn't in active use anyway - nothing to do here... */
1321 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1322 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
1323 spin_unlock_bh(&mvm->queue_info_lock);
1327 /* Check to see if there are inactive TIDs on this queue */
1328 for_each_set_bit(tid, &queue_tid_bitmap,
1329 IWL_MAX_TID_COUNT + 1) {
1330 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1331 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1334 inactive_tid_bitmap |= BIT(tid);
1336 spin_unlock_bh(&mvm->queue_info_lock);
1338 /* If all TIDs are active - finish check on this queue */
1339 if (!inactive_tid_bitmap)
1343 * If we are here - the queue hadn't been served recently and is
1347 sta_id = mvm->queue_info[i].ra_sta_id;
1348 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1351 * If the STA doesn't exist anymore, it isn't an error. It could
1352 * be that it was removed since getting the queues, and in this
1353 * case it should've inactivated its queues anyway.
1355 if (IS_ERR_OR_NULL(sta))
1358 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1360 spin_lock_bh(&mvmsta->lock);
1361 spin_lock(&mvm->queue_info_lock);
1362 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1363 inactive_tid_bitmap);
1364 spin_unlock(&mvm->queue_info_lock);
1365 spin_unlock_bh(&mvmsta->lock);
1371 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
1372 struct ieee80211_vif *vif,
1373 const struct ieee80211_sta *sta,
1376 struct iwl_fw_dbg_trigger_tlv *trig;
1377 struct iwl_fw_dbg_trigger_ba *ba_trig;
1379 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
1382 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
1383 ba_trig = (void *)trig->data;
1384 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1385 ieee80211_vif_to_wdev(vif), trig))
1388 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
1391 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1392 "Frame from %pM timed out, tid %d",
1396 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
1400 lockdep_assert_held(&mvm->mutex);
1402 /* Disable power save when reading GP2 */
1403 ps_disabled = mvm->ps_disabled;
1405 mvm->ps_disabled = true;
1406 iwl_mvm_power_update_device(mvm);
1409 *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
1410 *boottime = ktime_get_boot_ns();
1413 mvm->ps_disabled = ps_disabled;
1414 iwl_mvm_power_update_device(mvm);