2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pm_wakeirq.h>
34 #include "wl12xx_80211.h"
41 #include "vendor_cmd.h"
46 #define WL1271_BOOT_RETRIES 3
47 #define WL1271_SUSPEND_SLEEP 100
48 #define WL1271_WAKEUP_TIMEOUT 500
50 static char *fwlog_param;
51 static int fwlog_mem_blocks = -1;
52 static int bug_on_recovery = -1;
53 static int no_recovery = -1;
55 static void __wl1271_op_remove_interface(struct wl1271 *wl,
56 struct ieee80211_vif *vif,
57 bool reset_tx_queues);
58 static void wlcore_op_stop_locked(struct wl1271 *wl);
59 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
61 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
65 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
68 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
71 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
74 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
78 wl1271_info("Association completed.");
82 static void wl1271_reg_notify(struct wiphy *wiphy,
83 struct regulatory_request *request)
85 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 struct wl1271 *wl = hw->priv;
88 /* copy the current dfs region */
90 wl->dfs_region = request->dfs_region;
92 wlcore_regdomain_config(wl);
95 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
100 /* we should hold wl->mutex */
101 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
106 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
108 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 * this function is being called when the rx_streaming interval
115 * has beed changed or rx_streaming should be disabled
117 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
120 int period = wl->conf.rx_streaming.interval;
122 /* don't reconfigure if rx_streaming is disabled */
123 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
126 /* reconfigure/disable according to new streaming_period */
128 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
129 (wl->conf.rx_streaming.always ||
130 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
131 ret = wl1271_set_rx_streaming(wl, wlvif, true);
133 ret = wl1271_set_rx_streaming(wl, wlvif, false);
134 /* don't cancel_work_sync since we might deadlock */
135 del_timer_sync(&wlvif->rx_streaming_timer);
141 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
144 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
145 rx_streaming_enable_work);
146 struct wl1271 *wl = wlvif->wl;
148 mutex_lock(&wl->mutex);
150 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
151 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
152 (!wl->conf.rx_streaming.always &&
153 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
156 if (!wl->conf.rx_streaming.interval)
159 ret = pm_runtime_get_sync(wl->dev);
161 pm_runtime_put_noidle(wl->dev);
165 ret = wl1271_set_rx_streaming(wl, wlvif, true);
169 /* stop it after some time of inactivity */
170 mod_timer(&wlvif->rx_streaming_timer,
171 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
174 pm_runtime_mark_last_busy(wl->dev);
175 pm_runtime_put_autosuspend(wl->dev);
177 mutex_unlock(&wl->mutex);
180 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
183 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
184 rx_streaming_disable_work);
185 struct wl1271 *wl = wlvif->wl;
187 mutex_lock(&wl->mutex);
189 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
192 ret = pm_runtime_get_sync(wl->dev);
194 pm_runtime_put_noidle(wl->dev);
198 ret = wl1271_set_rx_streaming(wl, wlvif, false);
203 pm_runtime_mark_last_busy(wl->dev);
204 pm_runtime_put_autosuspend(wl->dev);
206 mutex_unlock(&wl->mutex);
209 static void wl1271_rx_streaming_timer(struct timer_list *t)
211 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
228 static void wlcore_rc_update_work(struct work_struct *work)
231 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
233 struct wl1271 *wl = wlvif->wl;
234 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
236 mutex_lock(&wl->mutex);
238 if (unlikely(wl->state != WLCORE_STATE_ON))
241 ret = pm_runtime_get_sync(wl->dev);
243 pm_runtime_put_noidle(wl->dev);
247 if (ieee80211_vif_is_mesh(vif)) {
248 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
249 true, wlvif->sta.hlid);
253 wlcore_hw_sta_rc_update(wl, wlvif);
257 pm_runtime_mark_last_busy(wl->dev);
258 pm_runtime_put_autosuspend(wl->dev);
260 mutex_unlock(&wl->mutex);
263 static void wl12xx_tx_watchdog_work(struct work_struct *work)
265 struct delayed_work *dwork;
268 dwork = to_delayed_work(work);
269 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
271 mutex_lock(&wl->mutex);
273 if (unlikely(wl->state != WLCORE_STATE_ON))
276 /* Tx went out in the meantime - everything is ok */
277 if (unlikely(wl->tx_allocated_blocks == 0))
281 * if a ROC is in progress, we might not have any Tx for a long
282 * time (e.g. pending Tx on the non-ROC channels)
284 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
285 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
286 wl->conf.tx.tx_watchdog_timeout);
287 wl12xx_rearm_tx_watchdog_locked(wl);
292 * if a scan is in progress, we might not have any Tx for a long
295 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_rearm_tx_watchdog_locked(wl);
303 * AP might cache a frame for a long time for a sleeping station,
304 * so rearm the timer if there's an AP interface with stations. If
305 * Tx is genuinely stuck we will most hopefully discover it when all
306 * stations are removed due to inactivity.
308 if (wl->active_sta_count) {
309 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
311 wl->conf.tx.tx_watchdog_timeout,
312 wl->active_sta_count);
313 wl12xx_rearm_tx_watchdog_locked(wl);
317 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
318 wl->conf.tx.tx_watchdog_timeout);
319 wl12xx_queue_recovery_work(wl);
322 mutex_unlock(&wl->mutex);
325 static void wlcore_adjust_conf(struct wl1271 *wl)
329 if (!strcmp(fwlog_param, "continuous")) {
330 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
331 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
332 } else if (!strcmp(fwlog_param, "dbgpins")) {
333 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
334 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
335 } else if (!strcmp(fwlog_param, "disable")) {
336 wl->conf.fwlog.mem_blocks = 0;
337 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
339 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
343 if (bug_on_recovery != -1)
344 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
346 if (no_recovery != -1)
347 wl->conf.recovery.no_recovery = (u8) no_recovery;
350 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
351 struct wl12xx_vif *wlvif,
356 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
359 * Wake up from high level PS if the STA is asleep with too little
360 * packets in FW or if the STA is awake.
362 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_end(wl, wlvif, hlid);
366 * Start high-level PS if the STA is asleep with enough blocks in FW.
367 * Make an exception if this is the only connected link. In this
368 * case FW-memory congestion is less of a problem.
369 * Note that a single connected STA means 2*ap_count + 1 active links,
370 * since we must account for the global and broadcast AP links
371 * for each AP. The "fw_ps" check assures us the other link is a STA
372 * connected to the AP. Otherwise the FW would not set the PSM bit.
374 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
375 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
376 wl12xx_ps_link_start(wl, wlvif, hlid, true);
379 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
380 struct wl12xx_vif *wlvif,
381 struct wl_fw_status *status)
383 unsigned long cur_fw_ps_map;
386 cur_fw_ps_map = status->link_ps_bitmap;
387 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
388 wl1271_debug(DEBUG_PSM,
389 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
390 wl->ap_fw_ps_map, cur_fw_ps_map,
391 wl->ap_fw_ps_map ^ cur_fw_ps_map);
393 wl->ap_fw_ps_map = cur_fw_ps_map;
396 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
397 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
398 wl->links[hlid].allocated_pkts);
401 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
403 struct wl12xx_vif *wlvif;
404 u32 old_tx_blk_count = wl->tx_blocks_available;
405 int avail, freed_blocks;
408 struct wl1271_link *lnk;
410 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
412 wl->fw_status_len, false);
416 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
418 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
419 "drv_rx_counter = %d, tx_results_counter = %d)",
421 status->fw_rx_counter,
422 status->drv_rx_counter,
423 status->tx_results_counter);
425 for (i = 0; i < NUM_TX_QUEUES; i++) {
426 /* prevent wrap-around in freed-packets counter */
427 wl->tx_allocated_pkts[i] -=
428 (status->counters.tx_released_pkts[i] -
429 wl->tx_pkts_freed[i]) & 0xff;
431 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
435 for_each_set_bit(i, wl->links_map, wl->num_links) {
439 /* prevent wrap-around in freed-packets counter */
440 diff = (status->counters.tx_lnk_free_pkts[i] -
441 lnk->prev_freed_pkts) & 0xff;
446 lnk->allocated_pkts -= diff;
447 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
449 /* accumulate the prev_freed_pkts counter */
450 lnk->total_freed_pkts += diff;
453 /* prevent wrap-around in total blocks counter */
454 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
455 freed_blocks = status->total_released_blks -
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 status->total_released_blks;
461 wl->tx_blocks_freed = status->total_released_blks;
463 wl->tx_allocated_blocks -= freed_blocks;
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
471 if (wl->tx_allocated_blocks)
472 wl12xx_rearm_tx_watchdog_locked(wl);
474 cancel_delayed_work(&wl->tx_watchdog_work);
477 avail = status->tx_total - wl->tx_allocated_blocks;
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
487 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl->tx_blocks_available > old_tx_blk_count)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status);
499 /* update the host-chipset time offset */
500 wl->time_offset = (ktime_get_boot_ns() >> 10) -
501 (s64)(status->fw_localtime);
503 wl->fw_fast_lnk_map = status->link_fast_bitmap;
508 static void wl1271_flush_deferred_work(struct wl1271 *wl)
512 /* Pass all received frames to the network stack */
513 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
514 ieee80211_rx_ni(wl->hw, skb);
516 /* Return sent skbs to the network stack */
517 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
518 ieee80211_tx_status_ni(wl->hw, skb);
521 static void wl1271_netstack_work(struct work_struct *work)
524 container_of(work, struct wl1271, netstack_work);
527 wl1271_flush_deferred_work(wl);
528 } while (skb_queue_len(&wl->deferred_rx_queue));
531 #define WL1271_IRQ_MAX_LOOPS 256
533 static int wlcore_irq_locked(struct wl1271 *wl)
537 int loopcount = WL1271_IRQ_MAX_LOOPS;
539 unsigned int defer_count;
543 * In case edge triggered interrupt must be used, we cannot iterate
544 * more than once without introducing race conditions with the hardirq.
546 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
549 wl1271_debug(DEBUG_IRQ, "IRQ work");
551 if (unlikely(wl->state != WLCORE_STATE_ON))
554 ret = pm_runtime_get_sync(wl->dev);
556 pm_runtime_put_noidle(wl->dev);
560 while (!done && loopcount--) {
562 * In order to avoid a race with the hardirq, clear the flag
563 * before acknowledging the chip.
565 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
566 smp_mb__after_atomic();
568 ret = wlcore_fw_status(wl, wl->fw_status);
572 wlcore_hw_tx_immediate_compl(wl);
574 intr = wl->fw_status->intr;
575 intr &= WLCORE_ALL_INTR_MASK;
581 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
582 wl1271_error("HW watchdog interrupt received! starting recovery.");
583 wl->watchdog_recovery = true;
586 /* restarting the chip. ignore any other interrupt. */
590 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
591 wl1271_error("SW watchdog interrupt received! "
592 "starting recovery.");
593 wl->watchdog_recovery = true;
596 /* restarting the chip. ignore any other interrupt. */
600 if (likely(intr & WL1271_ACX_INTR_DATA)) {
601 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
603 ret = wlcore_rx(wl, wl->fw_status);
607 /* Check if any tx blocks were freed */
608 spin_lock_irqsave(&wl->wl_lock, flags);
609 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
610 wl1271_tx_total_queue_count(wl) > 0) {
611 spin_unlock_irqrestore(&wl->wl_lock, flags);
613 * In order to avoid starvation of the TX path,
614 * call the work function directly.
616 ret = wlcore_tx_work_locked(wl);
620 spin_unlock_irqrestore(&wl->wl_lock, flags);
623 /* check for tx results */
624 ret = wlcore_hw_tx_delayed_compl(wl);
628 /* Make sure the deferred queues don't get too long */
629 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
630 skb_queue_len(&wl->deferred_rx_queue);
631 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
632 wl1271_flush_deferred_work(wl);
635 if (intr & WL1271_ACX_INTR_EVENT_A) {
636 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
637 ret = wl1271_event_handle(wl, 0);
642 if (intr & WL1271_ACX_INTR_EVENT_B) {
643 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
644 ret = wl1271_event_handle(wl, 1);
649 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
650 wl1271_debug(DEBUG_IRQ,
651 "WL1271_ACX_INTR_INIT_COMPLETE");
653 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
654 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
657 pm_runtime_mark_last_busy(wl->dev);
658 pm_runtime_put_autosuspend(wl->dev);
664 static irqreturn_t wlcore_irq(int irq, void *cookie)
668 struct wl1271 *wl = cookie;
670 /* complete the ELP completion */
671 spin_lock_irqsave(&wl->wl_lock, flags);
672 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
674 complete(wl->elp_compl);
675 wl->elp_compl = NULL;
678 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
679 /* don't enqueue a work right now. mark it as pending */
680 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
681 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
682 disable_irq_nosync(wl->irq);
683 pm_wakeup_event(wl->dev, 0);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
687 spin_unlock_irqrestore(&wl->wl_lock, flags);
689 /* TX might be handled here, avoid redundant work */
690 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
691 cancel_work_sync(&wl->tx_work);
693 mutex_lock(&wl->mutex);
695 ret = wlcore_irq_locked(wl);
697 wl12xx_queue_recovery_work(wl);
699 spin_lock_irqsave(&wl->wl_lock, flags);
700 /* In case TX was not handled here, queue TX work */
701 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
702 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
703 wl1271_tx_total_queue_count(wl) > 0)
704 ieee80211_queue_work(wl->hw, &wl->tx_work);
705 spin_unlock_irqrestore(&wl->wl_lock, flags);
707 mutex_unlock(&wl->mutex);
712 struct vif_counter_data {
715 struct ieee80211_vif *cur_vif;
716 bool cur_vif_running;
719 static void wl12xx_vif_count_iter(void *data, u8 *mac,
720 struct ieee80211_vif *vif)
722 struct vif_counter_data *counter = data;
725 if (counter->cur_vif == vif)
726 counter->cur_vif_running = true;
729 /* caller must not hold wl->mutex, as it might deadlock */
730 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
731 struct ieee80211_vif *cur_vif,
732 struct vif_counter_data *data)
734 memset(data, 0, sizeof(*data));
735 data->cur_vif = cur_vif;
737 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
738 wl12xx_vif_count_iter, data);
741 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
743 const struct firmware *fw;
745 enum wl12xx_fw_type fw_type;
749 fw_type = WL12XX_FW_TYPE_PLT;
750 fw_name = wl->plt_fw_name;
753 * we can't call wl12xx_get_vif_count() here because
754 * wl->mutex is taken, so use the cached last_vif_count value
756 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
757 fw_type = WL12XX_FW_TYPE_MULTI;
758 fw_name = wl->mr_fw_name;
760 fw_type = WL12XX_FW_TYPE_NORMAL;
761 fw_name = wl->sr_fw_name;
765 if (wl->fw_type == fw_type)
768 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
770 ret = request_firmware(&fw, fw_name, wl->dev);
773 wl1271_error("could not get firmware %s: %d", fw_name, ret);
778 wl1271_error("firmware size is not multiple of 32 bits: %zu",
785 wl->fw_type = WL12XX_FW_TYPE_NONE;
786 wl->fw_len = fw->size;
787 wl->fw = vmalloc(wl->fw_len);
790 wl1271_error("could not allocate memory for the firmware");
795 memcpy(wl->fw, fw->data, wl->fw_len);
797 wl->fw_type = fw_type;
799 release_firmware(fw);
804 void wl12xx_queue_recovery_work(struct wl1271 *wl)
806 /* Avoid a recursive recovery */
807 if (wl->state == WLCORE_STATE_ON) {
808 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
811 wl->state = WLCORE_STATE_RESTARTING;
812 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
813 ieee80211_queue_work(wl->hw, &wl->recovery_work);
817 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
821 /* Make sure we have enough room */
822 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
824 /* Fill the FW log file, consumed by the sysfs fwlog entry */
825 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
826 wl->fwlog_size += len;
831 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
836 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
839 wl1271_info("Reading FW panic log");
842 * Make sure the chip is awake and the logger isn't active.
843 * Do not send a stop fwlog command if the fw is hanged or if
844 * dbgpins are used (due to some fw bug).
846 error = pm_runtime_get_sync(wl->dev);
848 pm_runtime_put_noidle(wl->dev);
851 if (!wl->watchdog_recovery &&
852 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
853 wl12xx_cmd_stop_fwlog(wl);
855 /* Traverse the memory blocks linked list */
857 end_of_log = wlcore_event_fw_logger(wl);
858 if (end_of_log == 0) {
860 end_of_log = wlcore_event_fw_logger(wl);
862 } while (end_of_log != 0);
865 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
866 u8 hlid, struct ieee80211_sta *sta)
868 struct wl1271_station *wl_sta;
869 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
871 wl_sta = (void *)sta->drv_priv;
872 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
875 * increment the initial seq number on recovery to account for
876 * transmitted packets that we haven't yet got in the FW status
878 if (wlvif->encryption_type == KEY_GEM)
879 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
881 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
882 wl_sta->total_freed_pkts += sqn_recovery_padding;
885 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
886 struct wl12xx_vif *wlvif,
887 u8 hlid, const u8 *addr)
889 struct ieee80211_sta *sta;
890 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
892 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
893 is_zero_ether_addr(addr)))
897 sta = ieee80211_find_sta(vif, addr);
899 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
903 static void wlcore_print_recovery(struct wl1271 *wl)
909 wl1271_info("Hardware recovery in progress. FW ver: %s",
910 wl->chip.fw_ver_str);
912 /* change partitions momentarily so we can read the FW pc */
913 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
917 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
921 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
925 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
926 pc, hint_sts, ++wl->recovery_count);
928 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
932 static void wl1271_recovery_work(struct work_struct *work)
935 container_of(work, struct wl1271, recovery_work);
936 struct wl12xx_vif *wlvif;
937 struct ieee80211_vif *vif;
940 mutex_lock(&wl->mutex);
942 if (wl->state == WLCORE_STATE_OFF || wl->plt)
945 error = pm_runtime_get_sync(wl->dev);
947 wl1271_warning("Enable for recovery failed");
948 pm_runtime_put_noidle(wl->dev);
950 wlcore_disable_interrupts_nosync(wl);
952 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
953 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
954 wl12xx_read_fwlog_panic(wl);
955 wlcore_print_recovery(wl);
958 BUG_ON(wl->conf.recovery.bug_on_recovery &&
959 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
961 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
963 if (wl->conf.recovery.no_recovery) {
964 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
968 /* Prevent spurious TX during FW restart */
969 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
971 /* reboot the chipset */
972 while (!list_empty(&wl->wlvif_list)) {
973 wlvif = list_first_entry(&wl->wlvif_list,
974 struct wl12xx_vif, list);
975 vif = wl12xx_wlvif_to_vif(wlvif);
977 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
978 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
979 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
980 vif->bss_conf.bssid);
983 __wl1271_op_remove_interface(wl, vif, false);
986 wlcore_op_stop_locked(wl);
987 pm_runtime_mark_last_busy(wl->dev);
988 pm_runtime_put_autosuspend(wl->dev);
990 ieee80211_restart_hw(wl->hw);
993 * Its safe to enable TX now - the queues are stopped after a request
996 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
999 wl->watchdog_recovery = false;
1000 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1001 mutex_unlock(&wl->mutex);
1004 static int wlcore_fw_wakeup(struct wl1271 *wl)
1006 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1009 static int wl1271_setup(struct wl1271 *wl)
1011 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1012 if (!wl->raw_fw_status)
1015 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1019 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1025 kfree(wl->fw_status);
1026 kfree(wl->raw_fw_status);
1030 static int wl12xx_set_power_on(struct wl1271 *wl)
1034 msleep(WL1271_PRE_POWER_ON_SLEEP);
1035 ret = wl1271_power_on(wl);
1038 msleep(WL1271_POWER_ON_SLEEP);
1039 wl1271_io_reset(wl);
1042 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1046 /* ELP module wake up */
1047 ret = wlcore_fw_wakeup(wl);
1055 wl1271_power_off(wl);
1059 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1063 ret = wl12xx_set_power_on(wl);
1068 * For wl127x based devices we could use the default block
1069 * size (512 bytes), but due to a bug in the sdio driver, we
1070 * need to set it explicitly after the chip is powered on. To
1071 * simplify the code and since the performance impact is
1072 * negligible, we use the same block size for all different
1075 * Check if the bus supports blocksize alignment and, if it
1076 * doesn't, make sure we don't have the quirk.
1078 if (!wl1271_set_block_size(wl))
1079 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1081 /* TODO: make sure the lower driver has set things up correctly */
1083 ret = wl1271_setup(wl);
1087 ret = wl12xx_fetch_firmware(wl, plt);
1095 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1097 int retries = WL1271_BOOT_RETRIES;
1098 struct wiphy *wiphy = wl->hw->wiphy;
1100 static const char* const PLT_MODE[] = {
1109 mutex_lock(&wl->mutex);
1111 wl1271_notice("power up");
1113 if (wl->state != WLCORE_STATE_OFF) {
1114 wl1271_error("cannot go into PLT state because not "
1115 "in off state: %d", wl->state);
1120 /* Indicate to lower levels that we are now in PLT mode */
1122 wl->plt_mode = plt_mode;
1126 ret = wl12xx_chip_wakeup(wl, true);
1130 if (plt_mode != PLT_CHIP_AWAKE) {
1131 ret = wl->ops->plt_init(wl);
1136 wl->state = WLCORE_STATE_ON;
1137 wl1271_notice("firmware booted in PLT mode %s (%s)",
1139 wl->chip.fw_ver_str);
1141 /* update hw/fw version info in wiphy struct */
1142 wiphy->hw_version = wl->chip.id;
1143 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1144 sizeof(wiphy->fw_version));
1149 wl1271_power_off(wl);
1153 wl->plt_mode = PLT_OFF;
1155 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1156 WL1271_BOOT_RETRIES);
1158 mutex_unlock(&wl->mutex);
1163 int wl1271_plt_stop(struct wl1271 *wl)
1167 wl1271_notice("power down");
1170 * Interrupts must be disabled before setting the state to OFF.
1171 * Otherwise, the interrupt handler might be called and exit without
1172 * reading the interrupt status.
1174 wlcore_disable_interrupts(wl);
1175 mutex_lock(&wl->mutex);
1177 mutex_unlock(&wl->mutex);
1180 * This will not necessarily enable interrupts as interrupts
1181 * may have been disabled when op_stop was called. It will,
1182 * however, balance the above call to disable_interrupts().
1184 wlcore_enable_interrupts(wl);
1186 wl1271_error("cannot power down because not in PLT "
1187 "state: %d", wl->state);
1192 mutex_unlock(&wl->mutex);
1194 wl1271_flush_deferred_work(wl);
1195 cancel_work_sync(&wl->netstack_work);
1196 cancel_work_sync(&wl->recovery_work);
1197 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1199 mutex_lock(&wl->mutex);
1200 wl1271_power_off(wl);
1202 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1203 wl->state = WLCORE_STATE_OFF;
1205 wl->plt_mode = PLT_OFF;
1207 mutex_unlock(&wl->mutex);
1213 static void wl1271_op_tx(struct ieee80211_hw *hw,
1214 struct ieee80211_tx_control *control,
1215 struct sk_buff *skb)
1217 struct wl1271 *wl = hw->priv;
1218 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1219 struct ieee80211_vif *vif = info->control.vif;
1220 struct wl12xx_vif *wlvif = NULL;
1221 unsigned long flags;
1226 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1227 ieee80211_free_txskb(hw, skb);
1231 wlvif = wl12xx_vif_to_data(vif);
1232 mapping = skb_get_queue_mapping(skb);
1233 q = wl1271_tx_get_queue(mapping);
1235 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1237 spin_lock_irqsave(&wl->wl_lock, flags);
1240 * drop the packet if the link is invalid or the queue is stopped
1241 * for any reason but watermark. Watermark is a "soft"-stop so we
1242 * allow these packets through.
1244 if (hlid == WL12XX_INVALID_LINK_ID ||
1245 (!test_bit(hlid, wlvif->links_map)) ||
1246 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1247 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1248 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1249 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1250 ieee80211_free_txskb(hw, skb);
1254 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1256 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1258 wl->tx_queue_count[q]++;
1259 wlvif->tx_queue_count[q]++;
1262 * The workqueue is slow to process the tx_queue and we need stop
1263 * the queue here, otherwise the queue will get too long.
1265 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1266 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1267 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1268 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1269 wlcore_stop_queue_locked(wl, wlvif, q,
1270 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1274 * The chip specific setup must run before the first TX packet -
1275 * before that, the tx_work will not be initialized!
1278 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1279 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1280 ieee80211_queue_work(wl->hw, &wl->tx_work);
1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1286 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1288 unsigned long flags;
1291 /* no need to queue a new dummy packet if one is already pending */
1292 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1295 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1297 spin_lock_irqsave(&wl->wl_lock, flags);
1298 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1299 wl->tx_queue_count[q]++;
1300 spin_unlock_irqrestore(&wl->wl_lock, flags);
1302 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1303 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1304 return wlcore_tx_work_locked(wl);
1307 * If the FW TX is busy, TX work will be scheduled by the threaded
1308 * interrupt handler function
1314 * The size of the dummy packet should be at least 1400 bytes. However, in
1315 * order to minimize the number of bus transactions, aligning it to 512 bytes
1316 * boundaries could be beneficial, performance wise
1318 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1320 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1322 struct sk_buff *skb;
1323 struct ieee80211_hdr_3addr *hdr;
1324 unsigned int dummy_packet_size;
1326 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1327 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1329 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1331 wl1271_warning("Failed to allocate a dummy packet skb");
1335 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1337 hdr = skb_put_zero(skb, sizeof(*hdr));
1338 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1339 IEEE80211_STYPE_NULLFUNC |
1340 IEEE80211_FCTL_TODS);
1342 skb_put_zero(skb, dummy_packet_size);
1344 /* Dummy packets require the TID to be management */
1345 skb->priority = WL1271_TID_MGMT;
1347 /* Initialize all fields that might be used */
1348 skb_set_queue_mapping(skb, 0);
1349 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1356 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1358 int num_fields = 0, in_field = 0, fields_size = 0;
1359 int i, pattern_len = 0;
1362 wl1271_warning("No mask in WoWLAN pattern");
1367 * The pattern is broken up into segments of bytes at different offsets
1368 * that need to be checked by the FW filter. Each segment is called
1369 * a field in the FW API. We verify that the total number of fields
1370 * required for this pattern won't exceed FW limits (8)
1371 * as well as the total fields buffer won't exceed the FW limit.
1372 * Note that if there's a pattern which crosses Ethernet/IP header
1373 * boundary a new field is required.
1375 for (i = 0; i < p->pattern_len; i++) {
1376 if (test_bit(i, (unsigned long *)p->mask)) {
1381 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1383 fields_size += pattern_len +
1384 RX_FILTER_FIELD_OVERHEAD;
1392 fields_size += pattern_len +
1393 RX_FILTER_FIELD_OVERHEAD;
1400 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1404 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1405 wl1271_warning("RX Filter too complex. Too many segments");
1409 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1410 wl1271_warning("RX filter pattern is too big");
1417 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1419 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1422 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1429 for (i = 0; i < filter->num_fields; i++)
1430 kfree(filter->fields[i].pattern);
1435 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1436 u16 offset, u8 flags,
1437 const u8 *pattern, u8 len)
1439 struct wl12xx_rx_filter_field *field;
1441 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1442 wl1271_warning("Max fields per RX filter. can't alloc another");
1446 field = &filter->fields[filter->num_fields];
1448 field->pattern = kzalloc(len, GFP_KERNEL);
1449 if (!field->pattern) {
1450 wl1271_warning("Failed to allocate RX filter pattern");
1454 filter->num_fields++;
1456 field->offset = cpu_to_le16(offset);
1457 field->flags = flags;
1459 memcpy(field->pattern, pattern, len);
1464 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1466 int i, fields_size = 0;
1468 for (i = 0; i < filter->num_fields; i++)
1469 fields_size += filter->fields[i].len +
1470 sizeof(struct wl12xx_rx_filter_field) -
1476 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1480 struct wl12xx_rx_filter_field *field;
1482 for (i = 0; i < filter->num_fields; i++) {
1483 field = (struct wl12xx_rx_filter_field *)buf;
1485 field->offset = filter->fields[i].offset;
1486 field->flags = filter->fields[i].flags;
1487 field->len = filter->fields[i].len;
1489 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1490 buf += sizeof(struct wl12xx_rx_filter_field) -
1491 sizeof(u8 *) + field->len;
1496 * Allocates an RX filter returned through f
1497 * which needs to be freed using rx_filter_free()
1500 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1501 struct wl12xx_rx_filter **f)
1504 struct wl12xx_rx_filter *filter;
1508 filter = wl1271_rx_filter_alloc();
1510 wl1271_warning("Failed to alloc rx filter");
1516 while (i < p->pattern_len) {
1517 if (!test_bit(i, (unsigned long *)p->mask)) {
1522 for (j = i; j < p->pattern_len; j++) {
1523 if (!test_bit(j, (unsigned long *)p->mask))
1526 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1527 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1531 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1533 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1535 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1536 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1541 ret = wl1271_rx_filter_alloc_field(filter,
1544 &p->pattern[i], len);
1551 filter->action = FILTER_SIGNAL;
1557 wl1271_rx_filter_free(filter);
1563 static int wl1271_configure_wowlan(struct wl1271 *wl,
1564 struct cfg80211_wowlan *wow)
1568 if (!wow || wow->any || !wow->n_patterns) {
1569 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1574 ret = wl1271_rx_filter_clear_all(wl);
1581 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1584 /* Validate all incoming patterns before clearing current FW state */
1585 for (i = 0; i < wow->n_patterns; i++) {
1586 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1588 wl1271_warning("Bad wowlan pattern %d", i);
1593 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1597 ret = wl1271_rx_filter_clear_all(wl);
1601 /* Translate WoWLAN patterns into filters */
1602 for (i = 0; i < wow->n_patterns; i++) {
1603 struct cfg80211_pkt_pattern *p;
1604 struct wl12xx_rx_filter *filter = NULL;
1606 p = &wow->patterns[i];
1608 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1610 wl1271_warning("Failed to create an RX filter from "
1611 "wowlan pattern %d", i);
1615 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1617 wl1271_rx_filter_free(filter);
1622 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1628 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1629 struct wl12xx_vif *wlvif,
1630 struct cfg80211_wowlan *wow)
1634 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1637 ret = wl1271_configure_wowlan(wl, wow);
1641 if ((wl->conf.conn.suspend_wake_up_event ==
1642 wl->conf.conn.wake_up_event) &&
1643 (wl->conf.conn.suspend_listen_interval ==
1644 wl->conf.conn.listen_interval))
1647 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1648 wl->conf.conn.suspend_wake_up_event,
1649 wl->conf.conn.suspend_listen_interval);
1652 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1658 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1659 struct wl12xx_vif *wlvif,
1660 struct cfg80211_wowlan *wow)
1664 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1667 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1671 ret = wl1271_configure_wowlan(wl, wow);
1680 static int wl1271_configure_suspend(struct wl1271 *wl,
1681 struct wl12xx_vif *wlvif,
1682 struct cfg80211_wowlan *wow)
1684 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1685 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1686 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1687 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1691 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1694 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1695 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1697 if ((!is_ap) && (!is_sta))
1700 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1701 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1704 wl1271_configure_wowlan(wl, NULL);
1707 if ((wl->conf.conn.suspend_wake_up_event ==
1708 wl->conf.conn.wake_up_event) &&
1709 (wl->conf.conn.suspend_listen_interval ==
1710 wl->conf.conn.listen_interval))
1713 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1714 wl->conf.conn.wake_up_event,
1715 wl->conf.conn.listen_interval);
1718 wl1271_error("resume: wake up conditions failed: %d",
1722 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1726 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1727 struct cfg80211_wowlan *wow)
1729 struct wl1271 *wl = hw->priv;
1730 struct wl12xx_vif *wlvif;
1731 unsigned long flags;
1734 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1737 /* we want to perform the recovery before suspending */
1738 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1739 wl1271_warning("postponing suspend to perform recovery");
1743 wl1271_tx_flush(wl);
1745 mutex_lock(&wl->mutex);
1747 ret = pm_runtime_get_sync(wl->dev);
1749 pm_runtime_put_noidle(wl->dev);
1750 mutex_unlock(&wl->mutex);
1754 wl->wow_enabled = true;
1755 wl12xx_for_each_wlvif(wl, wlvif) {
1756 if (wlcore_is_p2p_mgmt(wlvif))
1759 ret = wl1271_configure_suspend(wl, wlvif, wow);
1761 mutex_unlock(&wl->mutex);
1762 wl1271_warning("couldn't prepare device to suspend");
1767 /* disable fast link flow control notifications from FW */
1768 ret = wlcore_hw_interrupt_notify(wl, false);
1772 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1773 ret = wlcore_hw_rx_ba_filter(wl,
1774 !!wl->conf.conn.suspend_rx_ba_activity);
1779 pm_runtime_put_noidle(wl->dev);
1780 mutex_unlock(&wl->mutex);
1783 wl1271_warning("couldn't prepare device to suspend");
1787 /* flush any remaining work */
1788 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1790 flush_work(&wl->tx_work);
1793 * Cancel the watchdog even if above tx_flush failed. We will detect
1794 * it on resume anyway.
1796 cancel_delayed_work(&wl->tx_watchdog_work);
1799 * set suspended flag to avoid triggering a new threaded_irq
1802 spin_lock_irqsave(&wl->wl_lock, flags);
1803 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1804 spin_unlock_irqrestore(&wl->wl_lock, flags);
1806 return pm_runtime_force_suspend(wl->dev);
1809 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1811 struct wl1271 *wl = hw->priv;
1812 struct wl12xx_vif *wlvif;
1813 unsigned long flags;
1814 bool run_irq_work = false, pending_recovery;
1817 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1819 WARN_ON(!wl->wow_enabled);
1821 ret = pm_runtime_force_resume(wl->dev);
1823 wl1271_error("ELP wakeup failure!");
1828 * re-enable irq_work enqueuing, and call irq_work directly if
1829 * there is a pending work.
1831 spin_lock_irqsave(&wl->wl_lock, flags);
1832 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1833 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1834 run_irq_work = true;
1835 spin_unlock_irqrestore(&wl->wl_lock, flags);
1837 mutex_lock(&wl->mutex);
1839 /* test the recovery flag before calling any SDIO functions */
1840 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1844 wl1271_debug(DEBUG_MAC80211,
1845 "run postponed irq_work directly");
1847 /* don't talk to the HW if recovery is pending */
1848 if (!pending_recovery) {
1849 ret = wlcore_irq_locked(wl);
1851 wl12xx_queue_recovery_work(wl);
1854 wlcore_enable_interrupts(wl);
1857 if (pending_recovery) {
1858 wl1271_warning("queuing forgotten recovery on resume");
1859 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1863 ret = pm_runtime_get_sync(wl->dev);
1865 pm_runtime_put_noidle(wl->dev);
1869 wl12xx_for_each_wlvif(wl, wlvif) {
1870 if (wlcore_is_p2p_mgmt(wlvif))
1873 wl1271_configure_resume(wl, wlvif);
1876 ret = wlcore_hw_interrupt_notify(wl, true);
1880 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1881 ret = wlcore_hw_rx_ba_filter(wl, false);
1886 pm_runtime_mark_last_busy(wl->dev);
1887 pm_runtime_put_autosuspend(wl->dev);
1890 wl->wow_enabled = false;
1893 * Set a flag to re-init the watchdog on the first Tx after resume.
1894 * That way we avoid possible conditions where Tx-complete interrupts
1895 * fail to arrive and we perform a spurious recovery.
1897 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1898 mutex_unlock(&wl->mutex);
1903 static int wl1271_op_start(struct ieee80211_hw *hw)
1905 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1908 * We have to delay the booting of the hardware because
1909 * we need to know the local MAC address before downloading and
1910 * initializing the firmware. The MAC address cannot be changed
1911 * after boot, and without the proper MAC address, the firmware
1912 * will not function properly.
1914 * The MAC address is first known when the corresponding interface
1915 * is added. That is where we will initialize the hardware.
1921 static void wlcore_op_stop_locked(struct wl1271 *wl)
1925 if (wl->state == WLCORE_STATE_OFF) {
1926 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1928 wlcore_enable_interrupts(wl);
1934 * this must be before the cancel_work calls below, so that the work
1935 * functions don't perform further work.
1937 wl->state = WLCORE_STATE_OFF;
1940 * Use the nosync variant to disable interrupts, so the mutex could be
1941 * held while doing so without deadlocking.
1943 wlcore_disable_interrupts_nosync(wl);
1945 mutex_unlock(&wl->mutex);
1947 wlcore_synchronize_interrupts(wl);
1948 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1949 cancel_work_sync(&wl->recovery_work);
1950 wl1271_flush_deferred_work(wl);
1951 cancel_delayed_work_sync(&wl->scan_complete_work);
1952 cancel_work_sync(&wl->netstack_work);
1953 cancel_work_sync(&wl->tx_work);
1954 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1956 /* let's notify MAC80211 about the remaining pending TX frames */
1957 mutex_lock(&wl->mutex);
1958 wl12xx_tx_reset(wl);
1960 wl1271_power_off(wl);
1962 * In case a recovery was scheduled, interrupts were disabled to avoid
1963 * an interrupt storm. Now that the power is down, it is safe to
1964 * re-enable interrupts to balance the disable depth
1966 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1967 wlcore_enable_interrupts(wl);
1969 wl->band = NL80211_BAND_2GHZ;
1972 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1973 wl->channel_type = NL80211_CHAN_NO_HT;
1974 wl->tx_blocks_available = 0;
1975 wl->tx_allocated_blocks = 0;
1976 wl->tx_results_count = 0;
1977 wl->tx_packets_count = 0;
1978 wl->time_offset = 0;
1979 wl->ap_fw_ps_map = 0;
1981 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1982 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1983 memset(wl->links_map, 0, sizeof(wl->links_map));
1984 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1985 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1986 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1987 wl->active_sta_count = 0;
1988 wl->active_link_count = 0;
1990 /* The system link is always allocated */
1991 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1992 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1993 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1996 * this is performed after the cancel_work calls and the associated
1997 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1998 * get executed before all these vars have been reset.
2002 wl->tx_blocks_freed = 0;
2004 for (i = 0; i < NUM_TX_QUEUES; i++) {
2005 wl->tx_pkts_freed[i] = 0;
2006 wl->tx_allocated_pkts[i] = 0;
2009 wl1271_debugfs_reset(wl);
2011 kfree(wl->raw_fw_status);
2012 wl->raw_fw_status = NULL;
2013 kfree(wl->fw_status);
2014 wl->fw_status = NULL;
2015 kfree(wl->tx_res_if);
2016 wl->tx_res_if = NULL;
2017 kfree(wl->target_mem_map);
2018 wl->target_mem_map = NULL;
2021 * FW channels must be re-calibrated after recovery,
2022 * save current Reg-Domain channel configuration and clear it.
2024 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2025 sizeof(wl->reg_ch_conf_pending));
2026 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2029 static void wlcore_op_stop(struct ieee80211_hw *hw)
2031 struct wl1271 *wl = hw->priv;
2033 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2035 mutex_lock(&wl->mutex);
2037 wlcore_op_stop_locked(wl);
2039 mutex_unlock(&wl->mutex);
2042 static void wlcore_channel_switch_work(struct work_struct *work)
2044 struct delayed_work *dwork;
2046 struct ieee80211_vif *vif;
2047 struct wl12xx_vif *wlvif;
2050 dwork = to_delayed_work(work);
2051 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2054 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2056 mutex_lock(&wl->mutex);
2058 if (unlikely(wl->state != WLCORE_STATE_ON))
2061 /* check the channel switch is still ongoing */
2062 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2065 vif = wl12xx_wlvif_to_vif(wlvif);
2066 ieee80211_chswitch_done(vif, false);
2068 ret = pm_runtime_get_sync(wl->dev);
2070 pm_runtime_put_noidle(wl->dev);
2074 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2076 pm_runtime_mark_last_busy(wl->dev);
2077 pm_runtime_put_autosuspend(wl->dev);
2079 mutex_unlock(&wl->mutex);
2082 static void wlcore_connection_loss_work(struct work_struct *work)
2084 struct delayed_work *dwork;
2086 struct ieee80211_vif *vif;
2087 struct wl12xx_vif *wlvif;
2089 dwork = to_delayed_work(work);
2090 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2093 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2095 mutex_lock(&wl->mutex);
2097 if (unlikely(wl->state != WLCORE_STATE_ON))
2100 /* Call mac80211 connection loss */
2101 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2104 vif = wl12xx_wlvif_to_vif(wlvif);
2105 ieee80211_connection_loss(vif);
2107 mutex_unlock(&wl->mutex);
2110 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2112 struct delayed_work *dwork;
2114 struct wl12xx_vif *wlvif;
2115 unsigned long time_spare;
2118 dwork = to_delayed_work(work);
2119 wlvif = container_of(dwork, struct wl12xx_vif,
2120 pending_auth_complete_work);
2123 mutex_lock(&wl->mutex);
2125 if (unlikely(wl->state != WLCORE_STATE_ON))
2129 * Make sure a second really passed since the last auth reply. Maybe
2130 * a second auth reply arrived while we were stuck on the mutex.
2131 * Check for a little less than the timeout to protect from scheduler
2134 time_spare = jiffies +
2135 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2136 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2139 ret = pm_runtime_get_sync(wl->dev);
2141 pm_runtime_put_noidle(wl->dev);
2145 /* cancel the ROC if active */
2146 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2148 pm_runtime_mark_last_busy(wl->dev);
2149 pm_runtime_put_autosuspend(wl->dev);
2151 mutex_unlock(&wl->mutex);
2154 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2156 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2157 WL12XX_MAX_RATE_POLICIES);
2158 if (policy >= WL12XX_MAX_RATE_POLICIES)
2161 __set_bit(policy, wl->rate_policies_map);
2166 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2168 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2171 __clear_bit(*idx, wl->rate_policies_map);
2172 *idx = WL12XX_MAX_RATE_POLICIES;
2175 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2177 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2178 WLCORE_MAX_KLV_TEMPLATES);
2179 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2182 __set_bit(policy, wl->klv_templates_map);
2187 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2189 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2192 __clear_bit(*idx, wl->klv_templates_map);
2193 *idx = WLCORE_MAX_KLV_TEMPLATES;
2196 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2198 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2200 switch (wlvif->bss_type) {
2201 case BSS_TYPE_AP_BSS:
2203 return WL1271_ROLE_P2P_GO;
2204 else if (ieee80211_vif_is_mesh(vif))
2205 return WL1271_ROLE_MESH_POINT;
2207 return WL1271_ROLE_AP;
2209 case BSS_TYPE_STA_BSS:
2211 return WL1271_ROLE_P2P_CL;
2213 return WL1271_ROLE_STA;
2216 return WL1271_ROLE_IBSS;
2219 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2221 return WL12XX_INVALID_ROLE_TYPE;
2224 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2226 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2229 /* clear everything but the persistent data */
2230 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2232 switch (ieee80211_vif_type_p2p(vif)) {
2233 case NL80211_IFTYPE_P2P_CLIENT:
2236 case NL80211_IFTYPE_STATION:
2237 case NL80211_IFTYPE_P2P_DEVICE:
2238 wlvif->bss_type = BSS_TYPE_STA_BSS;
2240 case NL80211_IFTYPE_ADHOC:
2241 wlvif->bss_type = BSS_TYPE_IBSS;
2243 case NL80211_IFTYPE_P2P_GO:
2246 case NL80211_IFTYPE_AP:
2247 case NL80211_IFTYPE_MESH_POINT:
2248 wlvif->bss_type = BSS_TYPE_AP_BSS;
2251 wlvif->bss_type = MAX_BSS_TYPE;
2255 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2256 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2257 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2259 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2260 wlvif->bss_type == BSS_TYPE_IBSS) {
2261 /* init sta/ibss data */
2262 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2263 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2264 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2265 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2266 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2267 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2268 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2269 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2272 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2273 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2274 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2275 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2276 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2277 wl12xx_allocate_rate_policy(wl,
2278 &wlvif->ap.ucast_rate_idx[i]);
2279 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2281 * TODO: check if basic_rate shouldn't be
2282 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2283 * instead (the same thing for STA above).
2285 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2286 /* TODO: this seems to be used only for STA, check it */
2287 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2290 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2291 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2292 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2295 * mac80211 configures some values globally, while we treat them
2296 * per-interface. thus, on init, we have to copy them from wl
2298 wlvif->band = wl->band;
2299 wlvif->channel = wl->channel;
2300 wlvif->power_level = wl->power_level;
2301 wlvif->channel_type = wl->channel_type;
2303 INIT_WORK(&wlvif->rx_streaming_enable_work,
2304 wl1271_rx_streaming_enable_work);
2305 INIT_WORK(&wlvif->rx_streaming_disable_work,
2306 wl1271_rx_streaming_disable_work);
2307 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2308 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2309 wlcore_channel_switch_work);
2310 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2311 wlcore_connection_loss_work);
2312 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2313 wlcore_pending_auth_complete_work);
2314 INIT_LIST_HEAD(&wlvif->list);
2316 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2320 static int wl12xx_init_fw(struct wl1271 *wl)
2322 int retries = WL1271_BOOT_RETRIES;
2323 bool booted = false;
2324 struct wiphy *wiphy = wl->hw->wiphy;
2329 ret = wl12xx_chip_wakeup(wl, false);
2333 ret = wl->ops->boot(wl);
2337 ret = wl1271_hw_init(wl);
2345 mutex_unlock(&wl->mutex);
2346 /* Unlocking the mutex in the middle of handling is
2347 inherently unsafe. In this case we deem it safe to do,
2348 because we need to let any possibly pending IRQ out of
2349 the system (and while we are WLCORE_STATE_OFF the IRQ
2350 work function will not do anything.) Also, any other
2351 possible concurrent operations will fail due to the
2352 current state, hence the wl1271 struct should be safe. */
2353 wlcore_disable_interrupts(wl);
2354 wl1271_flush_deferred_work(wl);
2355 cancel_work_sync(&wl->netstack_work);
2356 mutex_lock(&wl->mutex);
2358 wl1271_power_off(wl);
2362 wl1271_error("firmware boot failed despite %d retries",
2363 WL1271_BOOT_RETRIES);
2367 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2369 /* update hw/fw version info in wiphy struct */
2370 wiphy->hw_version = wl->chip.id;
2371 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2372 sizeof(wiphy->fw_version));
2375 * Now we know if 11a is supported (info from the NVS), so disable
2376 * 11a channels if not supported
2378 if (!wl->enable_11a)
2379 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2381 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2382 wl->enable_11a ? "" : "not ");
2384 wl->state = WLCORE_STATE_ON;
2389 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2391 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2395 * Check whether a fw switch (i.e. moving from one loaded
2396 * fw to another) is needed. This function is also responsible
2397 * for updating wl->last_vif_count, so it must be called before
2398 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2401 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2402 struct vif_counter_data vif_counter_data,
2405 enum wl12xx_fw_type current_fw = wl->fw_type;
2406 u8 vif_count = vif_counter_data.counter;
2408 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2411 /* increase the vif count if this is a new vif */
2412 if (add && !vif_counter_data.cur_vif_running)
2415 wl->last_vif_count = vif_count;
2417 /* no need for fw change if the device is OFF */
2418 if (wl->state == WLCORE_STATE_OFF)
2421 /* no need for fw change if a single fw is used */
2422 if (!wl->mr_fw_name)
2425 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2427 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2434 * Enter "forced psm". Make sure the sta is in psm against the ap,
2435 * to make the fw switch a bit more disconnection-persistent.
2437 static void wl12xx_force_active_psm(struct wl1271 *wl)
2439 struct wl12xx_vif *wlvif;
2441 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2442 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2446 struct wlcore_hw_queue_iter_data {
2447 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2449 struct ieee80211_vif *vif;
2450 /* is the current vif among those iterated */
2454 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2455 struct ieee80211_vif *vif)
2457 struct wlcore_hw_queue_iter_data *iter_data = data;
2459 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2460 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2463 if (iter_data->cur_running || vif == iter_data->vif) {
2464 iter_data->cur_running = true;
2468 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2471 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2472 struct wl12xx_vif *wlvif)
2474 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2475 struct wlcore_hw_queue_iter_data iter_data = {};
2478 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2479 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2483 iter_data.vif = vif;
2485 /* mark all bits taken by active interfaces */
2486 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2487 IEEE80211_IFACE_ITER_RESUME_ALL,
2488 wlcore_hw_queue_iter, &iter_data);
2490 /* the current vif is already running in mac80211 (resume/recovery) */
2491 if (iter_data.cur_running) {
2492 wlvif->hw_queue_base = vif->hw_queue[0];
2493 wl1271_debug(DEBUG_MAC80211,
2494 "using pre-allocated hw queue base %d",
2495 wlvif->hw_queue_base);
2497 /* interface type might have changed type */
2498 goto adjust_cab_queue;
2501 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2502 WLCORE_NUM_MAC_ADDRESSES);
2503 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2506 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2507 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2508 wlvif->hw_queue_base);
2510 for (i = 0; i < NUM_TX_QUEUES; i++) {
2511 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2512 /* register hw queues in mac80211 */
2513 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2517 /* the last places are reserved for cab queues per interface */
2518 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2519 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2520 wlvif->hw_queue_base / NUM_TX_QUEUES;
2522 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2527 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2528 struct ieee80211_vif *vif)
2530 struct wl1271 *wl = hw->priv;
2531 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2532 struct vif_counter_data vif_count;
2537 wl1271_error("Adding Interface not allowed while in PLT mode");
2541 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2542 IEEE80211_VIF_SUPPORTS_UAPSD |
2543 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2545 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2546 ieee80211_vif_type_p2p(vif), vif->addr);
2548 wl12xx_get_vif_count(hw, vif, &vif_count);
2550 mutex_lock(&wl->mutex);
2553 * in some very corner case HW recovery scenarios its possible to
2554 * get here before __wl1271_op_remove_interface is complete, so
2555 * opt out if that is the case.
2557 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2558 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2564 ret = wl12xx_init_vif_data(wl, vif);
2569 role_type = wl12xx_get_role_type(wl, wlvif);
2570 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2575 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2580 * TODO: after the nvs issue will be solved, move this block
2581 * to start(), and make sure here the driver is ON.
2583 if (wl->state == WLCORE_STATE_OFF) {
2585 * we still need this in order to configure the fw
2586 * while uploading the nvs
2588 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2590 ret = wl12xx_init_fw(wl);
2596 * Call runtime PM only after possible wl12xx_init_fw() above
2597 * is done. Otherwise we do not have interrupts enabled.
2599 ret = pm_runtime_get_sync(wl->dev);
2601 pm_runtime_put_noidle(wl->dev);
2605 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2606 wl12xx_force_active_psm(wl);
2607 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2608 mutex_unlock(&wl->mutex);
2609 wl1271_recovery_work(&wl->recovery_work);
2613 if (!wlcore_is_p2p_mgmt(wlvif)) {
2614 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2615 role_type, &wlvif->role_id);
2619 ret = wl1271_init_vif_specific(wl, vif);
2624 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2625 &wlvif->dev_role_id);
2629 /* needed mainly for configuring rate policies */
2630 ret = wl1271_sta_hw_init(wl, wlvif);
2635 list_add(&wlvif->list, &wl->wlvif_list);
2636 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2638 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2643 pm_runtime_mark_last_busy(wl->dev);
2644 pm_runtime_put_autosuspend(wl->dev);
2646 mutex_unlock(&wl->mutex);
2651 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2652 struct ieee80211_vif *vif,
2653 bool reset_tx_queues)
2655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2657 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2659 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2661 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2664 /* because of hardware recovery, we may get here twice */
2665 if (wl->state == WLCORE_STATE_OFF)
2668 wl1271_info("down");
2670 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2671 wl->scan_wlvif == wlvif) {
2672 struct cfg80211_scan_info info = {
2677 * Rearm the tx watchdog just before idling scan. This
2678 * prevents just-finished scans from triggering the watchdog
2680 wl12xx_rearm_tx_watchdog_locked(wl);
2682 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2683 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2684 wl->scan_wlvif = NULL;
2685 wl->scan.req = NULL;
2686 ieee80211_scan_completed(wl->hw, &info);
2689 if (wl->sched_vif == wlvif)
2690 wl->sched_vif = NULL;
2692 if (wl->roc_vif == vif) {
2694 ieee80211_remain_on_channel_expired(wl->hw);
2697 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2698 /* disable active roles */
2699 ret = pm_runtime_get_sync(wl->dev);
2701 pm_runtime_put_noidle(wl->dev);
2705 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2706 wlvif->bss_type == BSS_TYPE_IBSS) {
2707 if (wl12xx_dev_role_started(wlvif))
2708 wl12xx_stop_dev(wl, wlvif);
2711 if (!wlcore_is_p2p_mgmt(wlvif)) {
2712 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2716 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2721 pm_runtime_mark_last_busy(wl->dev);
2722 pm_runtime_put_autosuspend(wl->dev);
2725 wl12xx_tx_reset_wlvif(wl, wlvif);
2727 /* clear all hlids (except system_hlid) */
2728 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2730 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2731 wlvif->bss_type == BSS_TYPE_IBSS) {
2732 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2733 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2734 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2735 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2736 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2738 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2739 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2740 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2741 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2742 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2743 wl12xx_free_rate_policy(wl,
2744 &wlvif->ap.ucast_rate_idx[i]);
2745 wl1271_free_ap_keys(wl, wlvif);
2748 dev_kfree_skb(wlvif->probereq);
2749 wlvif->probereq = NULL;
2750 if (wl->last_wlvif == wlvif)
2751 wl->last_wlvif = NULL;
2752 list_del(&wlvif->list);
2753 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2754 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2755 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2763 * Last AP, have more stations. Configure sleep auth according to STA.
2764 * Don't do thin on unintended recovery.
2766 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2767 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2770 if (wl->ap_count == 0 && is_ap) {
2771 /* mask ap events */
2772 wl->event_mask &= ~wl->ap_event_mask;
2773 wl1271_event_unmask(wl);
2776 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2777 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2778 /* Configure for power according to debugfs */
2779 if (sta_auth != WL1271_PSM_ILLEGAL)
2780 wl1271_acx_sleep_auth(wl, sta_auth);
2781 /* Configure for ELP power saving */
2783 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2787 mutex_unlock(&wl->mutex);
2789 del_timer_sync(&wlvif->rx_streaming_timer);
2790 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2791 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2792 cancel_work_sync(&wlvif->rc_update_work);
2793 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2794 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2795 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2797 mutex_lock(&wl->mutex);
2800 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2801 struct ieee80211_vif *vif)
2803 struct wl1271 *wl = hw->priv;
2804 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2805 struct wl12xx_vif *iter;
2806 struct vif_counter_data vif_count;
2808 wl12xx_get_vif_count(hw, vif, &vif_count);
2809 mutex_lock(&wl->mutex);
2811 if (wl->state == WLCORE_STATE_OFF ||
2812 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2816 * wl->vif can be null here if someone shuts down the interface
2817 * just when hardware recovery has been started.
2819 wl12xx_for_each_wlvif(wl, iter) {
2823 __wl1271_op_remove_interface(wl, vif, true);
2826 WARN_ON(iter != wlvif);
2827 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2828 wl12xx_force_active_psm(wl);
2829 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2830 wl12xx_queue_recovery_work(wl);
2833 mutex_unlock(&wl->mutex);
2836 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2837 struct ieee80211_vif *vif,
2838 enum nl80211_iftype new_type, bool p2p)
2840 struct wl1271 *wl = hw->priv;
2843 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2844 wl1271_op_remove_interface(hw, vif);
2846 vif->type = new_type;
2848 ret = wl1271_op_add_interface(hw, vif);
2850 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2854 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2857 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2860 * One of the side effects of the JOIN command is that is clears
2861 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2862 * to a WPA/WPA2 access point will therefore kill the data-path.
2863 * Currently the only valid scenario for JOIN during association
2864 * is on roaming, in which case we will also be given new keys.
2865 * Keep the below message for now, unless it starts bothering
2866 * users who really like to roam a lot :)
2868 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2869 wl1271_info("JOIN while associated.");
2871 /* clear encryption type */
2872 wlvif->encryption_type = KEY_NONE;
2875 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2877 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2879 * TODO: this is an ugly workaround for wl12xx fw
2880 * bug - we are not able to tx/rx after the first
2881 * start_sta, so make dummy start+stop calls,
2882 * and then call start_sta again.
2883 * this should be fixed in the fw.
2885 wl12xx_cmd_role_start_sta(wl, wlvif);
2886 wl12xx_cmd_role_stop_sta(wl, wlvif);
2889 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2895 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2899 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2903 wl1271_error("No SSID in IEs!");
2908 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2909 wl1271_error("SSID is too long!");
2913 wlvif->ssid_len = ssid_len;
2914 memcpy(wlvif->ssid, ptr+2, ssid_len);
2918 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2920 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2921 struct sk_buff *skb;
2924 /* we currently only support setting the ssid from the ap probe req */
2925 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2928 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2932 ieoffset = offsetof(struct ieee80211_mgmt,
2933 u.probe_req.variable);
2934 wl1271_ssid_set(wlvif, skb, ieoffset);
2940 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2941 struct ieee80211_bss_conf *bss_conf,
2947 wlvif->aid = bss_conf->aid;
2948 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2949 wlvif->beacon_int = bss_conf->beacon_int;
2950 wlvif->wmm_enabled = bss_conf->qos;
2952 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2955 * with wl1271, we don't need to update the
2956 * beacon_int and dtim_period, because the firmware
2957 * updates it by itself when the first beacon is
2958 * received after a join.
2960 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2965 * Get a template for hardware connection maintenance
2967 dev_kfree_skb(wlvif->probereq);
2968 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2971 ieoffset = offsetof(struct ieee80211_mgmt,
2972 u.probe_req.variable);
2973 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2975 /* enable the connection monitoring feature */
2976 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2981 * The join command disable the keep-alive mode, shut down its process,
2982 * and also clear the template config, so we need to reset it all after
2983 * the join. The acx_aid starts the keep-alive process, and the order
2984 * of the commands below is relevant.
2986 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2990 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2994 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2998 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2999 wlvif->sta.klv_template_id,
3000 ACX_KEEP_ALIVE_TPL_VALID);
3005 * The default fw psm configuration is AUTO, while mac80211 default
3006 * setting is off (ACTIVE), so sync the fw with the correct value.
3008 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3014 wl1271_tx_enabled_rates_get(wl,
3017 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3025 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3028 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3030 /* make sure we are connected (sta) joined */
3032 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3035 /* make sure we are joined (ibss) */
3037 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3041 /* use defaults when not associated */
3044 /* free probe-request template */
3045 dev_kfree_skb(wlvif->probereq);
3046 wlvif->probereq = NULL;
3048 /* disable connection monitor features */
3049 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3053 /* Disable the keep-alive feature */
3054 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3058 /* disable beacon filtering */
3059 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3064 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3065 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3067 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3068 ieee80211_chswitch_done(vif, false);
3069 cancel_delayed_work(&wlvif->channel_switch_work);
3072 /* invalidate keep-alive template */
3073 wl1271_acx_keep_alive_config(wl, wlvif,
3074 wlvif->sta.klv_template_id,
3075 ACX_KEEP_ALIVE_TPL_INVALID);
3080 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3082 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3083 wlvif->rate_set = wlvif->basic_rate_set;
3086 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3089 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3091 if (idle == cur_idle)
3095 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3097 /* The current firmware only supports sched_scan in idle */
3098 if (wl->sched_vif == wlvif)
3099 wl->ops->sched_scan_stop(wl, wlvif);
3101 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3105 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3106 struct ieee80211_conf *conf, u32 changed)
3110 if (wlcore_is_p2p_mgmt(wlvif))
3113 if (conf->power_level != wlvif->power_level) {
3114 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3118 wlvif->power_level = conf->power_level;
3124 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3126 struct wl1271 *wl = hw->priv;
3127 struct wl12xx_vif *wlvif;
3128 struct ieee80211_conf *conf = &hw->conf;
3131 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3133 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3135 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3138 mutex_lock(&wl->mutex);
3140 if (changed & IEEE80211_CONF_CHANGE_POWER)
3141 wl->power_level = conf->power_level;
3143 if (unlikely(wl->state != WLCORE_STATE_ON))
3146 ret = pm_runtime_get_sync(wl->dev);
3148 pm_runtime_put_noidle(wl->dev);
3152 /* configure each interface */
3153 wl12xx_for_each_wlvif(wl, wlvif) {
3154 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3160 pm_runtime_mark_last_busy(wl->dev);
3161 pm_runtime_put_autosuspend(wl->dev);
3164 mutex_unlock(&wl->mutex);
3169 struct wl1271_filter_params {
3172 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3175 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3176 struct netdev_hw_addr_list *mc_list)
3178 struct wl1271_filter_params *fp;
3179 struct netdev_hw_addr *ha;
3181 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3183 wl1271_error("Out of memory setting filters.");
3187 /* update multicast filtering parameters */
3188 fp->mc_list_length = 0;
3189 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3190 fp->enabled = false;
3193 netdev_hw_addr_list_for_each(ha, mc_list) {
3194 memcpy(fp->mc_list[fp->mc_list_length],
3195 ha->addr, ETH_ALEN);
3196 fp->mc_list_length++;
3200 return (u64)(unsigned long)fp;
3203 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3205 FIF_BCN_PRBRESP_PROMISC | \
3209 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3210 unsigned int changed,
3211 unsigned int *total, u64 multicast)
3213 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3214 struct wl1271 *wl = hw->priv;
3215 struct wl12xx_vif *wlvif;
3219 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3220 " total %x", changed, *total);
3222 mutex_lock(&wl->mutex);
3224 *total &= WL1271_SUPPORTED_FILTERS;
3225 changed &= WL1271_SUPPORTED_FILTERS;
3227 if (unlikely(wl->state != WLCORE_STATE_ON))
3230 ret = pm_runtime_get_sync(wl->dev);
3232 pm_runtime_put_noidle(wl->dev);
3236 wl12xx_for_each_wlvif(wl, wlvif) {
3237 if (wlcore_is_p2p_mgmt(wlvif))
3240 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3241 if (*total & FIF_ALLMULTI)
3242 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3246 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3249 fp->mc_list_length);
3255 * If interface in AP mode and created with allmulticast then disable
3256 * the firmware filters so that all multicast packets are passed
3257 * This is mandatory for MDNS based discovery protocols
3259 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3260 if (*total & FIF_ALLMULTI) {
3261 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3271 * the fw doesn't provide an api to configure the filters. instead,
3272 * the filters configuration is based on the active roles / ROC
3277 pm_runtime_mark_last_busy(wl->dev);
3278 pm_runtime_put_autosuspend(wl->dev);
3281 mutex_unlock(&wl->mutex);
3285 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3286 u8 id, u8 key_type, u8 key_size,
3287 const u8 *key, u8 hlid, u32 tx_seq_32,
3290 struct wl1271_ap_key *ap_key;
3293 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3295 if (key_size > MAX_KEY_SIZE)
3299 * Find next free entry in ap_keys. Also check we are not replacing
3302 for (i = 0; i < MAX_NUM_KEYS; i++) {
3303 if (wlvif->ap.recorded_keys[i] == NULL)
3306 if (wlvif->ap.recorded_keys[i]->id == id) {
3307 wl1271_warning("trying to record key replacement");
3312 if (i == MAX_NUM_KEYS)
3315 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3320 ap_key->key_type = key_type;
3321 ap_key->key_size = key_size;
3322 memcpy(ap_key->key, key, key_size);
3323 ap_key->hlid = hlid;
3324 ap_key->tx_seq_32 = tx_seq_32;
3325 ap_key->tx_seq_16 = tx_seq_16;
3327 wlvif->ap.recorded_keys[i] = ap_key;
3331 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3335 for (i = 0; i < MAX_NUM_KEYS; i++) {
3336 kfree(wlvif->ap.recorded_keys[i]);
3337 wlvif->ap.recorded_keys[i] = NULL;
3341 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3344 struct wl1271_ap_key *key;
3345 bool wep_key_added = false;
3347 for (i = 0; i < MAX_NUM_KEYS; i++) {
3349 if (wlvif->ap.recorded_keys[i] == NULL)
3352 key = wlvif->ap.recorded_keys[i];
3354 if (hlid == WL12XX_INVALID_LINK_ID)
3355 hlid = wlvif->ap.bcast_hlid;
3357 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3358 key->id, key->key_type,
3359 key->key_size, key->key,
3360 hlid, key->tx_seq_32,
3365 if (key->key_type == KEY_WEP)
3366 wep_key_added = true;
3369 if (wep_key_added) {
3370 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3371 wlvif->ap.bcast_hlid);
3377 wl1271_free_ap_keys(wl, wlvif);
3381 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3382 u16 action, u8 id, u8 key_type,
3383 u8 key_size, const u8 *key, u32 tx_seq_32,
3384 u16 tx_seq_16, struct ieee80211_sta *sta)
3387 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3390 struct wl1271_station *wl_sta;
3394 wl_sta = (struct wl1271_station *)sta->drv_priv;
3395 hlid = wl_sta->hlid;
3397 hlid = wlvif->ap.bcast_hlid;
3400 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3402 * We do not support removing keys after AP shutdown.
3403 * Pretend we do to make mac80211 happy.
3405 if (action != KEY_ADD_OR_REPLACE)
3408 ret = wl1271_record_ap_key(wl, wlvif, id,
3410 key, hlid, tx_seq_32,
3413 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3414 id, key_type, key_size,
3415 key, hlid, tx_seq_32,
3423 static const u8 bcast_addr[ETH_ALEN] = {
3424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3427 addr = sta ? sta->addr : bcast_addr;
3429 if (is_zero_ether_addr(addr)) {
3430 /* We dont support TX only encryption */
3434 /* The wl1271 does not allow to remove unicast keys - they
3435 will be cleared automatically on next CMD_JOIN. Ignore the
3436 request silently, as we dont want the mac80211 to emit
3437 an error message. */
3438 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3441 /* don't remove key if hlid was already deleted */
3442 if (action == KEY_REMOVE &&
3443 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3446 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3447 id, key_type, key_size,
3448 key, addr, tx_seq_32,
3458 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3459 struct ieee80211_vif *vif,
3460 struct ieee80211_sta *sta,
3461 struct ieee80211_key_conf *key_conf)
3463 struct wl1271 *wl = hw->priv;
3465 bool might_change_spare =
3466 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3467 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3469 if (might_change_spare) {
3471 * stop the queues and flush to ensure the next packets are
3472 * in sync with FW spare block accounting
3474 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3475 wl1271_tx_flush(wl);
3478 mutex_lock(&wl->mutex);
3480 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3482 goto out_wake_queues;
3485 ret = pm_runtime_get_sync(wl->dev);
3487 pm_runtime_put_noidle(wl->dev);
3488 goto out_wake_queues;
3491 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3493 pm_runtime_mark_last_busy(wl->dev);
3494 pm_runtime_put_autosuspend(wl->dev);
3497 if (might_change_spare)
3498 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3500 mutex_unlock(&wl->mutex);
3505 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3506 struct ieee80211_vif *vif,
3507 struct ieee80211_sta *sta,
3508 struct ieee80211_key_conf *key_conf)
3510 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3517 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3519 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3520 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3521 key_conf->cipher, key_conf->keyidx,
3522 key_conf->keylen, key_conf->flags);
3523 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3525 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3527 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3528 hlid = wl_sta->hlid;
3530 hlid = wlvif->ap.bcast_hlid;
3533 hlid = wlvif->sta.hlid;
3535 if (hlid != WL12XX_INVALID_LINK_ID) {
3536 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3537 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3538 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3541 switch (key_conf->cipher) {
3542 case WLAN_CIPHER_SUITE_WEP40:
3543 case WLAN_CIPHER_SUITE_WEP104:
3546 key_conf->hw_key_idx = key_conf->keyidx;
3548 case WLAN_CIPHER_SUITE_TKIP:
3549 key_type = KEY_TKIP;
3550 key_conf->hw_key_idx = key_conf->keyidx;
3552 case WLAN_CIPHER_SUITE_CCMP:
3554 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3556 case WL1271_CIPHER_SUITE_GEM:
3560 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3567 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3568 key_conf->keyidx, key_type,
3569 key_conf->keylen, key_conf->key,
3570 tx_seq_32, tx_seq_16, sta);
3572 wl1271_error("Could not add or replace key");
3577 * reconfiguring arp response if the unicast (or common)
3578 * encryption key type was changed
3580 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3581 (sta || key_type == KEY_WEP) &&
3582 wlvif->encryption_type != key_type) {
3583 wlvif->encryption_type = key_type;
3584 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3586 wl1271_warning("build arp rsp failed: %d", ret);
3593 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3594 key_conf->keyidx, key_type,
3595 key_conf->keylen, key_conf->key,
3598 wl1271_error("Could not remove key");
3604 wl1271_error("Unsupported key cmd 0x%x", cmd);
3610 EXPORT_SYMBOL_GPL(wlcore_set_key);
3612 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3613 struct ieee80211_vif *vif,
3616 struct wl1271 *wl = hw->priv;
3617 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3620 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3623 /* we don't handle unsetting of default key */
3627 mutex_lock(&wl->mutex);
3629 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3634 ret = pm_runtime_get_sync(wl->dev);
3636 pm_runtime_put_noidle(wl->dev);
3640 wlvif->default_key = key_idx;
3642 /* the default WEP key needs to be configured at least once */
3643 if (wlvif->encryption_type == KEY_WEP) {
3644 ret = wl12xx_cmd_set_default_wep_key(wl,
3652 pm_runtime_mark_last_busy(wl->dev);
3653 pm_runtime_put_autosuspend(wl->dev);
3656 mutex_unlock(&wl->mutex);
3659 void wlcore_regdomain_config(struct wl1271 *wl)
3663 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3666 mutex_lock(&wl->mutex);
3668 if (unlikely(wl->state != WLCORE_STATE_ON))
3671 ret = pm_runtime_get_sync(wl->dev);
3675 ret = wlcore_cmd_regdomain_config_locked(wl);
3677 wl12xx_queue_recovery_work(wl);
3681 pm_runtime_mark_last_busy(wl->dev);
3682 pm_runtime_put_autosuspend(wl->dev);
3684 mutex_unlock(&wl->mutex);
3687 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3688 struct ieee80211_vif *vif,
3689 struct ieee80211_scan_request *hw_req)
3691 struct cfg80211_scan_request *req = &hw_req->req;
3692 struct wl1271 *wl = hw->priv;
3697 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3700 ssid = req->ssids[0].ssid;
3701 len = req->ssids[0].ssid_len;
3704 mutex_lock(&wl->mutex);
3706 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3708 * We cannot return -EBUSY here because cfg80211 will expect
3709 * a call to ieee80211_scan_completed if we do - in this case
3710 * there won't be any call.
3716 ret = pm_runtime_get_sync(wl->dev);
3718 pm_runtime_put_noidle(wl->dev);
3722 /* fail if there is any role in ROC */
3723 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3724 /* don't allow scanning right now */
3729 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3731 pm_runtime_mark_last_busy(wl->dev);
3732 pm_runtime_put_autosuspend(wl->dev);
3734 mutex_unlock(&wl->mutex);
3739 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3740 struct ieee80211_vif *vif)
3742 struct wl1271 *wl = hw->priv;
3743 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3744 struct cfg80211_scan_info info = {
3749 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3751 mutex_lock(&wl->mutex);
3753 if (unlikely(wl->state != WLCORE_STATE_ON))
3756 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3759 ret = pm_runtime_get_sync(wl->dev);
3761 pm_runtime_put_noidle(wl->dev);
3765 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3766 ret = wl->ops->scan_stop(wl, wlvif);
3772 * Rearm the tx watchdog just before idling scan. This
3773 * prevents just-finished scans from triggering the watchdog
3775 wl12xx_rearm_tx_watchdog_locked(wl);
3777 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3778 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3779 wl->scan_wlvif = NULL;
3780 wl->scan.req = NULL;
3781 ieee80211_scan_completed(wl->hw, &info);
3784 pm_runtime_mark_last_busy(wl->dev);
3785 pm_runtime_put_autosuspend(wl->dev);
3787 mutex_unlock(&wl->mutex);
3789 cancel_delayed_work_sync(&wl->scan_complete_work);
3792 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3793 struct ieee80211_vif *vif,
3794 struct cfg80211_sched_scan_request *req,
3795 struct ieee80211_scan_ies *ies)
3797 struct wl1271 *wl = hw->priv;
3798 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3801 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3803 mutex_lock(&wl->mutex);
3805 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3810 ret = pm_runtime_get_sync(wl->dev);
3812 pm_runtime_put_noidle(wl->dev);
3816 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3820 wl->sched_vif = wlvif;
3823 pm_runtime_mark_last_busy(wl->dev);
3824 pm_runtime_put_autosuspend(wl->dev);
3826 mutex_unlock(&wl->mutex);
3830 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3831 struct ieee80211_vif *vif)
3833 struct wl1271 *wl = hw->priv;
3834 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3837 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3839 mutex_lock(&wl->mutex);
3841 if (unlikely(wl->state != WLCORE_STATE_ON))
3844 ret = pm_runtime_get_sync(wl->dev);
3846 pm_runtime_put_noidle(wl->dev);
3850 wl->ops->sched_scan_stop(wl, wlvif);
3852 pm_runtime_mark_last_busy(wl->dev);
3853 pm_runtime_put_autosuspend(wl->dev);
3855 mutex_unlock(&wl->mutex);
3860 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3862 struct wl1271 *wl = hw->priv;
3865 mutex_lock(&wl->mutex);
3867 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3872 ret = pm_runtime_get_sync(wl->dev);
3874 pm_runtime_put_noidle(wl->dev);
3878 ret = wl1271_acx_frag_threshold(wl, value);
3880 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3882 pm_runtime_mark_last_busy(wl->dev);
3883 pm_runtime_put_autosuspend(wl->dev);
3886 mutex_unlock(&wl->mutex);
3891 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3893 struct wl1271 *wl = hw->priv;
3894 struct wl12xx_vif *wlvif;
3897 mutex_lock(&wl->mutex);
3899 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3904 ret = pm_runtime_get_sync(wl->dev);
3906 pm_runtime_put_noidle(wl->dev);
3910 wl12xx_for_each_wlvif(wl, wlvif) {
3911 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3913 wl1271_warning("set rts threshold failed: %d", ret);
3915 pm_runtime_mark_last_busy(wl->dev);
3916 pm_runtime_put_autosuspend(wl->dev);
3919 mutex_unlock(&wl->mutex);
3924 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3927 const u8 *next, *end = skb->data + skb->len;
3928 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3929 skb->len - ieoffset);
3934 memmove(ie, next, end - next);
3935 skb_trim(skb, skb->len - len);
3938 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3939 unsigned int oui, u8 oui_type,
3943 const u8 *next, *end = skb->data + skb->len;
3944 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3945 skb->data + ieoffset,
3946 skb->len - ieoffset);
3951 memmove(ie, next, end - next);
3952 skb_trim(skb, skb->len - len);
3955 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3956 struct ieee80211_vif *vif)
3958 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3959 struct sk_buff *skb;
3962 skb = ieee80211_proberesp_get(wl->hw, vif);
3966 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3967 CMD_TEMPL_AP_PROBE_RESPONSE,
3976 wl1271_debug(DEBUG_AP, "probe response updated");
3977 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3983 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3984 struct ieee80211_vif *vif,
3986 size_t probe_rsp_len,
3989 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3990 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3991 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3992 int ssid_ie_offset, ie_offset, templ_len;
3995 /* no need to change probe response if the SSID is set correctly */
3996 if (wlvif->ssid_len > 0)
3997 return wl1271_cmd_template_set(wl, wlvif->role_id,
3998 CMD_TEMPL_AP_PROBE_RESPONSE,
4003 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4004 wl1271_error("probe_rsp template too big");
4008 /* start searching from IE offset */
4009 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4011 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4012 probe_rsp_len - ie_offset);
4014 wl1271_error("No SSID in beacon!");
4018 ssid_ie_offset = ptr - probe_rsp_data;
4019 ptr += (ptr[1] + 2);
4021 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4023 /* insert SSID from bss_conf */
4024 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4025 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4026 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4027 bss_conf->ssid, bss_conf->ssid_len);
4028 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4030 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4031 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4032 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4034 return wl1271_cmd_template_set(wl, wlvif->role_id,
4035 CMD_TEMPL_AP_PROBE_RESPONSE,
4041 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4042 struct ieee80211_vif *vif,
4043 struct ieee80211_bss_conf *bss_conf,
4046 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4049 if (changed & BSS_CHANGED_ERP_SLOT) {
4050 if (bss_conf->use_short_slot)
4051 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4053 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4055 wl1271_warning("Set slot time failed %d", ret);
4060 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4061 if (bss_conf->use_short_preamble)
4062 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4064 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4067 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4068 if (bss_conf->use_cts_prot)
4069 ret = wl1271_acx_cts_protect(wl, wlvif,
4072 ret = wl1271_acx_cts_protect(wl, wlvif,
4073 CTSPROTECT_DISABLE);
4075 wl1271_warning("Set ctsprotect failed %d", ret);
4084 static int wlcore_set_beacon_template(struct wl1271 *wl,
4085 struct ieee80211_vif *vif,
4088 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4089 struct ieee80211_hdr *hdr;
4092 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4093 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4101 wl1271_debug(DEBUG_MASTER, "beacon updated");
4103 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4105 dev_kfree_skb(beacon);
4108 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4109 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4111 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4116 dev_kfree_skb(beacon);
4120 wlvif->wmm_enabled =
4121 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4122 WLAN_OUI_TYPE_MICROSOFT_WMM,
4123 beacon->data + ieoffset,
4124 beacon->len - ieoffset);
4127 * In case we already have a probe-resp beacon set explicitly
4128 * by usermode, don't use the beacon data.
4130 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4133 /* remove TIM ie from probe response */
4134 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4137 * remove p2p ie from probe response.
4138 * the fw reponds to probe requests that don't include
4139 * the p2p ie. probe requests with p2p ie will be passed,
4140 * and will be responded by the supplicant (the spec
4141 * forbids including the p2p ie when responding to probe
4142 * requests that didn't include it).
4144 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4145 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4147 hdr = (struct ieee80211_hdr *) beacon->data;
4148 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4149 IEEE80211_STYPE_PROBE_RESP);
4151 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4156 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4157 CMD_TEMPL_PROBE_RESPONSE,
4162 dev_kfree_skb(beacon);
4170 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4171 struct ieee80211_vif *vif,
4172 struct ieee80211_bss_conf *bss_conf,
4175 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4176 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4179 if (changed & BSS_CHANGED_BEACON_INT) {
4180 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4181 bss_conf->beacon_int);
4183 wlvif->beacon_int = bss_conf->beacon_int;
4186 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4187 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4189 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4192 if (changed & BSS_CHANGED_BEACON) {
4193 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4197 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4199 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4206 wl1271_error("beacon info change failed: %d", ret);
4210 /* AP mode changes */
4211 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4212 struct ieee80211_vif *vif,
4213 struct ieee80211_bss_conf *bss_conf,
4216 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4219 if (changed & BSS_CHANGED_BASIC_RATES) {
4220 u32 rates = bss_conf->basic_rates;
4222 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4224 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4225 wlvif->basic_rate_set);
4227 ret = wl1271_init_ap_rates(wl, wlvif);
4229 wl1271_error("AP rate policy change failed %d", ret);
4233 ret = wl1271_ap_init_templates(wl, vif);
4237 /* No need to set probe resp template for mesh */
4238 if (!ieee80211_vif_is_mesh(vif)) {
4239 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4246 ret = wlcore_set_beacon_template(wl, vif, true);
4251 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4255 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4256 if (bss_conf->enable_beacon) {
4257 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4258 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4262 ret = wl1271_ap_init_hwenc(wl, wlvif);
4266 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4267 wl1271_debug(DEBUG_AP, "started AP");
4270 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4272 * AP might be in ROC in case we have just
4273 * sent auth reply. handle it.
4275 if (test_bit(wlvif->role_id, wl->roc_map))
4276 wl12xx_croc(wl, wlvif->role_id);
4278 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4282 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4283 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4285 wl1271_debug(DEBUG_AP, "stopped AP");
4290 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4294 /* Handle HT information change */
4295 if ((changed & BSS_CHANGED_HT) &&
4296 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4297 ret = wl1271_acx_set_ht_information(wl, wlvif,
4298 bss_conf->ht_operation_mode);
4300 wl1271_warning("Set ht information failed %d", ret);
4309 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4310 struct ieee80211_bss_conf *bss_conf,
4316 wl1271_debug(DEBUG_MAC80211,
4317 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4318 bss_conf->bssid, bss_conf->aid,
4319 bss_conf->beacon_int,
4320 bss_conf->basic_rates, sta_rate_set);
4322 wlvif->beacon_int = bss_conf->beacon_int;
4323 rates = bss_conf->basic_rates;
4324 wlvif->basic_rate_set =
4325 wl1271_tx_enabled_rates_get(wl, rates,
4328 wl1271_tx_min_rate_get(wl,
4329 wlvif->basic_rate_set);
4333 wl1271_tx_enabled_rates_get(wl,
4337 /* we only support sched_scan while not connected */
4338 if (wl->sched_vif == wlvif)
4339 wl->ops->sched_scan_stop(wl, wlvif);
4341 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4345 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4349 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4353 wlcore_set_ssid(wl, wlvif);
4355 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4360 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4364 /* revert back to minimum rates for the current band */
4365 wl1271_set_band_rate(wl, wlvif);
4366 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4368 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4372 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4373 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4374 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4379 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4382 /* STA/IBSS mode changes */
4383 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4384 struct ieee80211_vif *vif,
4385 struct ieee80211_bss_conf *bss_conf,
4388 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4389 bool do_join = false;
4390 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4391 bool ibss_joined = false;
4392 u32 sta_rate_set = 0;
4394 struct ieee80211_sta *sta;
4395 bool sta_exists = false;
4396 struct ieee80211_sta_ht_cap sta_ht_cap;
4399 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4405 if (changed & BSS_CHANGED_IBSS) {
4406 if (bss_conf->ibss_joined) {
4407 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4410 wlcore_unset_assoc(wl, wlvif);
4411 wl12xx_cmd_role_stop_sta(wl, wlvif);
4415 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4418 /* Need to update the SSID (for filtering etc) */
4419 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4422 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4423 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4424 bss_conf->enable_beacon ? "enabled" : "disabled");
4429 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4430 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4432 if (changed & BSS_CHANGED_CQM) {
4433 bool enable = false;
4434 if (bss_conf->cqm_rssi_thold)
4436 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4437 bss_conf->cqm_rssi_thold,
4438 bss_conf->cqm_rssi_hyst);
4441 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4444 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4445 BSS_CHANGED_ASSOC)) {
4447 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4449 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4451 /* save the supp_rates of the ap */
4452 sta_rate_set = sta->supp_rates[wlvif->band];
4453 if (sta->ht_cap.ht_supported)
4455 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4456 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4457 sta_ht_cap = sta->ht_cap;
4464 if (changed & BSS_CHANGED_BSSID) {
4465 if (!is_zero_ether_addr(bss_conf->bssid)) {
4466 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4471 /* Need to update the BSSID (for filtering etc) */
4474 ret = wlcore_clear_bssid(wl, wlvif);
4480 if (changed & BSS_CHANGED_IBSS) {
4481 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4482 bss_conf->ibss_joined);
4484 if (bss_conf->ibss_joined) {
4485 u32 rates = bss_conf->basic_rates;
4486 wlvif->basic_rate_set =
4487 wl1271_tx_enabled_rates_get(wl, rates,
4490 wl1271_tx_min_rate_get(wl,
4491 wlvif->basic_rate_set);
4493 /* by default, use 11b + OFDM rates */
4494 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4495 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4501 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4502 /* enable beacon filtering */
4503 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4508 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4513 ret = wlcore_join(wl, wlvif);
4515 wl1271_warning("cmd join failed %d", ret);
4520 if (changed & BSS_CHANGED_ASSOC) {
4521 if (bss_conf->assoc) {
4522 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4527 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4528 wl12xx_set_authorized(wl, wlvif);
4530 wlcore_unset_assoc(wl, wlvif);
4534 if (changed & BSS_CHANGED_PS) {
4535 if ((bss_conf->ps) &&
4536 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4537 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4541 if (wl->conf.conn.forced_ps) {
4542 ps_mode = STATION_POWER_SAVE_MODE;
4543 ps_mode_str = "forced";
4545 ps_mode = STATION_AUTO_PS_MODE;
4546 ps_mode_str = "auto";
4549 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4551 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4553 wl1271_warning("enter %s ps failed %d",
4555 } else if (!bss_conf->ps &&
4556 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4557 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4559 ret = wl1271_ps_set_mode(wl, wlvif,
4560 STATION_ACTIVE_MODE);
4562 wl1271_warning("exit auto ps failed %d", ret);
4566 /* Handle new association with HT. Do this after join. */
4569 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4571 ret = wlcore_hw_set_peer_cap(wl,
4577 wl1271_warning("Set ht cap failed %d", ret);
4583 ret = wl1271_acx_set_ht_information(wl, wlvif,
4584 bss_conf->ht_operation_mode);
4586 wl1271_warning("Set ht information failed %d",
4593 /* Handle arp filtering. Done after join. */
4594 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4595 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4596 __be32 addr = bss_conf->arp_addr_list[0];
4597 wlvif->sta.qos = bss_conf->qos;
4598 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4600 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4601 wlvif->ip_addr = addr;
4603 * The template should have been configured only upon
4604 * association. however, it seems that the correct ip
4605 * isn't being set (when sending), so we have to
4606 * reconfigure the template upon every ip change.
4608 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4610 wl1271_warning("build arp rsp failed: %d", ret);
4614 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4615 (ACX_ARP_FILTER_ARP_FILTERING |
4616 ACX_ARP_FILTER_AUTO_ARP),
4620 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4631 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4632 struct ieee80211_vif *vif,
4633 struct ieee80211_bss_conf *bss_conf,
4636 struct wl1271 *wl = hw->priv;
4637 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4638 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4641 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4642 wlvif->role_id, (int)changed);
4645 * make sure to cancel pending disconnections if our association
4648 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4649 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4651 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4652 !bss_conf->enable_beacon)
4653 wl1271_tx_flush(wl);
4655 mutex_lock(&wl->mutex);
4657 if (unlikely(wl->state != WLCORE_STATE_ON))
4660 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4663 ret = pm_runtime_get_sync(wl->dev);
4665 pm_runtime_put_noidle(wl->dev);
4669 if ((changed & BSS_CHANGED_TXPOWER) &&
4670 bss_conf->txpower != wlvif->power_level) {
4672 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4676 wlvif->power_level = bss_conf->txpower;
4680 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4682 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4684 pm_runtime_mark_last_busy(wl->dev);
4685 pm_runtime_put_autosuspend(wl->dev);
4688 mutex_unlock(&wl->mutex);
4691 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4692 struct ieee80211_chanctx_conf *ctx)
4694 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4695 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4696 cfg80211_get_chandef_type(&ctx->def));
4700 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4701 struct ieee80211_chanctx_conf *ctx)
4703 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4704 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4705 cfg80211_get_chandef_type(&ctx->def));
4708 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4709 struct ieee80211_chanctx_conf *ctx,
4712 struct wl1271 *wl = hw->priv;
4713 struct wl12xx_vif *wlvif;
4715 int channel = ieee80211_frequency_to_channel(
4716 ctx->def.chan->center_freq);
4718 wl1271_debug(DEBUG_MAC80211,
4719 "mac80211 change chanctx %d (type %d) changed 0x%x",
4720 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4722 mutex_lock(&wl->mutex);
4724 ret = pm_runtime_get_sync(wl->dev);
4726 pm_runtime_put_noidle(wl->dev);
4730 wl12xx_for_each_wlvif(wl, wlvif) {
4731 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4734 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4740 /* start radar if needed */
4741 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4742 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4743 ctx->radar_enabled && !wlvif->radar_enabled &&
4744 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4745 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4746 wlcore_hw_set_cac(wl, wlvif, true);
4747 wlvif->radar_enabled = true;
4751 pm_runtime_mark_last_busy(wl->dev);
4752 pm_runtime_put_autosuspend(wl->dev);
4754 mutex_unlock(&wl->mutex);
4757 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4758 struct ieee80211_vif *vif,
4759 struct ieee80211_chanctx_conf *ctx)
4761 struct wl1271 *wl = hw->priv;
4762 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4763 int channel = ieee80211_frequency_to_channel(
4764 ctx->def.chan->center_freq);
4767 wl1271_debug(DEBUG_MAC80211,
4768 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4769 wlvif->role_id, channel,
4770 cfg80211_get_chandef_type(&ctx->def),
4771 ctx->radar_enabled, ctx->def.chan->dfs_state);
4773 mutex_lock(&wl->mutex);
4775 if (unlikely(wl->state != WLCORE_STATE_ON))
4778 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4781 ret = pm_runtime_get_sync(wl->dev);
4783 pm_runtime_put_noidle(wl->dev);
4787 wlvif->band = ctx->def.chan->band;
4788 wlvif->channel = channel;
4789 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4791 /* update default rates according to the band */
4792 wl1271_set_band_rate(wl, wlvif);
4794 if (ctx->radar_enabled &&
4795 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4796 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4797 wlcore_hw_set_cac(wl, wlvif, true);
4798 wlvif->radar_enabled = true;
4801 pm_runtime_mark_last_busy(wl->dev);
4802 pm_runtime_put_autosuspend(wl->dev);
4804 mutex_unlock(&wl->mutex);
4809 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4810 struct ieee80211_vif *vif,
4811 struct ieee80211_chanctx_conf *ctx)
4813 struct wl1271 *wl = hw->priv;
4814 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4817 wl1271_debug(DEBUG_MAC80211,
4818 "mac80211 unassign chanctx (role %d) %d (type %d)",
4820 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4821 cfg80211_get_chandef_type(&ctx->def));
4823 wl1271_tx_flush(wl);
4825 mutex_lock(&wl->mutex);
4827 if (unlikely(wl->state != WLCORE_STATE_ON))
4830 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4833 ret = pm_runtime_get_sync(wl->dev);
4835 pm_runtime_put_noidle(wl->dev);
4839 if (wlvif->radar_enabled) {
4840 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4841 wlcore_hw_set_cac(wl, wlvif, false);
4842 wlvif->radar_enabled = false;
4845 pm_runtime_mark_last_busy(wl->dev);
4846 pm_runtime_put_autosuspend(wl->dev);
4848 mutex_unlock(&wl->mutex);
4851 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4852 struct wl12xx_vif *wlvif,
4853 struct ieee80211_chanctx_conf *new_ctx)
4855 int channel = ieee80211_frequency_to_channel(
4856 new_ctx->def.chan->center_freq);
4858 wl1271_debug(DEBUG_MAC80211,
4859 "switch vif (role %d) %d -> %d chan_type: %d",
4860 wlvif->role_id, wlvif->channel, channel,
4861 cfg80211_get_chandef_type(&new_ctx->def));
4863 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4866 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4868 if (wlvif->radar_enabled) {
4869 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4870 wlcore_hw_set_cac(wl, wlvif, false);
4871 wlvif->radar_enabled = false;
4874 wlvif->band = new_ctx->def.chan->band;
4875 wlvif->channel = channel;
4876 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4878 /* start radar if needed */
4879 if (new_ctx->radar_enabled) {
4880 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4881 wlcore_hw_set_cac(wl, wlvif, true);
4882 wlvif->radar_enabled = true;
4889 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4890 struct ieee80211_vif_chanctx_switch *vifs,
4892 enum ieee80211_chanctx_switch_mode mode)
4894 struct wl1271 *wl = hw->priv;
4897 wl1271_debug(DEBUG_MAC80211,
4898 "mac80211 switch chanctx n_vifs %d mode %d",
4901 mutex_lock(&wl->mutex);
4903 ret = pm_runtime_get_sync(wl->dev);
4905 pm_runtime_put_noidle(wl->dev);
4909 for (i = 0; i < n_vifs; i++) {
4910 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4912 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4917 pm_runtime_mark_last_busy(wl->dev);
4918 pm_runtime_put_autosuspend(wl->dev);
4920 mutex_unlock(&wl->mutex);
4925 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4926 struct ieee80211_vif *vif, u16 queue,
4927 const struct ieee80211_tx_queue_params *params)
4929 struct wl1271 *wl = hw->priv;
4930 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4934 if (wlcore_is_p2p_mgmt(wlvif))
4937 mutex_lock(&wl->mutex);
4939 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4942 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4944 ps_scheme = CONF_PS_SCHEME_LEGACY;
4946 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4949 ret = pm_runtime_get_sync(wl->dev);
4951 pm_runtime_put_noidle(wl->dev);
4956 * the txop is confed in units of 32us by the mac80211,
4959 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4960 params->cw_min, params->cw_max,
4961 params->aifs, params->txop << 5);
4965 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4966 CONF_CHANNEL_TYPE_EDCF,
4967 wl1271_tx_get_queue(queue),
4968 ps_scheme, CONF_ACK_POLICY_LEGACY,
4972 pm_runtime_mark_last_busy(wl->dev);
4973 pm_runtime_put_autosuspend(wl->dev);
4976 mutex_unlock(&wl->mutex);
4981 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4982 struct ieee80211_vif *vif)
4985 struct wl1271 *wl = hw->priv;
4986 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4987 u64 mactime = ULLONG_MAX;
4990 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4992 mutex_lock(&wl->mutex);
4994 if (unlikely(wl->state != WLCORE_STATE_ON))
4997 ret = pm_runtime_get_sync(wl->dev);
4999 pm_runtime_put_noidle(wl->dev);
5003 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5008 pm_runtime_mark_last_busy(wl->dev);
5009 pm_runtime_put_autosuspend(wl->dev);
5012 mutex_unlock(&wl->mutex);
5016 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5017 struct survey_info *survey)
5019 struct ieee80211_conf *conf = &hw->conf;
5024 survey->channel = conf->chandef.chan;
5029 static int wl1271_allocate_sta(struct wl1271 *wl,
5030 struct wl12xx_vif *wlvif,
5031 struct ieee80211_sta *sta)
5033 struct wl1271_station *wl_sta;
5037 if (wl->active_sta_count >= wl->max_ap_stations) {
5038 wl1271_warning("could not allocate HLID - too much stations");
5042 wl_sta = (struct wl1271_station *)sta->drv_priv;
5043 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5045 wl1271_warning("could not allocate HLID - too many links");
5049 /* use the previous security seq, if this is a recovery/resume */
5050 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5052 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5053 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5054 wl->active_sta_count++;
5058 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5060 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5063 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5064 __clear_bit(hlid, &wl->ap_ps_map);
5065 __clear_bit(hlid, &wl->ap_fw_ps_map);
5068 * save the last used PN in the private part of iee80211_sta,
5069 * in case of recovery/suspend
5071 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5073 wl12xx_free_link(wl, wlvif, &hlid);
5074 wl->active_sta_count--;
5077 * rearm the tx watchdog when the last STA is freed - give the FW a
5078 * chance to return STA-buffered packets before complaining.
5080 if (wl->active_sta_count == 0)
5081 wl12xx_rearm_tx_watchdog_locked(wl);
5084 static int wl12xx_sta_add(struct wl1271 *wl,
5085 struct wl12xx_vif *wlvif,
5086 struct ieee80211_sta *sta)
5088 struct wl1271_station *wl_sta;
5092 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5094 ret = wl1271_allocate_sta(wl, wlvif, sta);
5098 wl_sta = (struct wl1271_station *)sta->drv_priv;
5099 hlid = wl_sta->hlid;
5101 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5103 wl1271_free_sta(wl, wlvif, hlid);
5108 static int wl12xx_sta_remove(struct wl1271 *wl,
5109 struct wl12xx_vif *wlvif,
5110 struct ieee80211_sta *sta)
5112 struct wl1271_station *wl_sta;
5115 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5117 wl_sta = (struct wl1271_station *)sta->drv_priv;
5119 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5122 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5126 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5130 static void wlcore_roc_if_possible(struct wl1271 *wl,
5131 struct wl12xx_vif *wlvif)
5133 if (find_first_bit(wl->roc_map,
5134 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5137 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5140 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5144 * when wl_sta is NULL, we treat this call as if coming from a
5145 * pending auth reply.
5146 * wl->mutex must be taken and the FW must be awake when the call
5149 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5150 struct wl1271_station *wl_sta, bool in_conn)
5153 if (WARN_ON(wl_sta && wl_sta->in_connection))
5156 if (!wlvif->ap_pending_auth_reply &&
5157 !wlvif->inconn_count)
5158 wlcore_roc_if_possible(wl, wlvif);
5161 wl_sta->in_connection = true;
5162 wlvif->inconn_count++;
5164 wlvif->ap_pending_auth_reply = true;
5167 if (wl_sta && !wl_sta->in_connection)
5170 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5173 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5177 wl_sta->in_connection = false;
5178 wlvif->inconn_count--;
5180 wlvif->ap_pending_auth_reply = false;
5183 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5184 test_bit(wlvif->role_id, wl->roc_map))
5185 wl12xx_croc(wl, wlvif->role_id);
5189 static int wl12xx_update_sta_state(struct wl1271 *wl,
5190 struct wl12xx_vif *wlvif,
5191 struct ieee80211_sta *sta,
5192 enum ieee80211_sta_state old_state,
5193 enum ieee80211_sta_state new_state)
5195 struct wl1271_station *wl_sta;
5196 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5197 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5200 wl_sta = (struct wl1271_station *)sta->drv_priv;
5202 /* Add station (AP mode) */
5204 old_state == IEEE80211_STA_NOTEXIST &&
5205 new_state == IEEE80211_STA_NONE) {
5206 ret = wl12xx_sta_add(wl, wlvif, sta);
5210 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5213 /* Remove station (AP mode) */
5215 old_state == IEEE80211_STA_NONE &&
5216 new_state == IEEE80211_STA_NOTEXIST) {
5218 wl12xx_sta_remove(wl, wlvif, sta);
5220 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5223 /* Authorize station (AP mode) */
5225 new_state == IEEE80211_STA_AUTHORIZED) {
5226 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5230 /* reconfigure rates */
5231 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5235 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5240 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5243 /* Authorize station */
5245 new_state == IEEE80211_STA_AUTHORIZED) {
5246 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5247 ret = wl12xx_set_authorized(wl, wlvif);
5253 old_state == IEEE80211_STA_AUTHORIZED &&
5254 new_state == IEEE80211_STA_ASSOC) {
5255 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5256 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5259 /* save seq number on disassoc (suspend) */
5261 old_state == IEEE80211_STA_ASSOC &&
5262 new_state == IEEE80211_STA_AUTH) {
5263 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5264 wlvif->total_freed_pkts = 0;
5267 /* restore seq number on assoc (resume) */
5269 old_state == IEEE80211_STA_AUTH &&
5270 new_state == IEEE80211_STA_ASSOC) {
5271 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5274 /* clear ROCs on failure or authorization */
5276 (new_state == IEEE80211_STA_AUTHORIZED ||
5277 new_state == IEEE80211_STA_NOTEXIST)) {
5278 if (test_bit(wlvif->role_id, wl->roc_map))
5279 wl12xx_croc(wl, wlvif->role_id);
5283 old_state == IEEE80211_STA_NOTEXIST &&
5284 new_state == IEEE80211_STA_NONE) {
5285 if (find_first_bit(wl->roc_map,
5286 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5287 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5288 wl12xx_roc(wl, wlvif, wlvif->role_id,
5289 wlvif->band, wlvif->channel);
5295 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5296 struct ieee80211_vif *vif,
5297 struct ieee80211_sta *sta,
5298 enum ieee80211_sta_state old_state,
5299 enum ieee80211_sta_state new_state)
5301 struct wl1271 *wl = hw->priv;
5302 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5305 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5306 sta->aid, old_state, new_state);
5308 mutex_lock(&wl->mutex);
5310 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5315 ret = pm_runtime_get_sync(wl->dev);
5317 pm_runtime_put_noidle(wl->dev);
5321 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5323 pm_runtime_mark_last_busy(wl->dev);
5324 pm_runtime_put_autosuspend(wl->dev);
5326 mutex_unlock(&wl->mutex);
5327 if (new_state < old_state)
5332 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5333 struct ieee80211_vif *vif,
5334 struct ieee80211_ampdu_params *params)
5336 struct wl1271 *wl = hw->priv;
5337 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5339 u8 hlid, *ba_bitmap;
5340 struct ieee80211_sta *sta = params->sta;
5341 enum ieee80211_ampdu_mlme_action action = params->action;
5342 u16 tid = params->tid;
5343 u16 *ssn = ¶ms->ssn;
5345 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5348 /* sanity check - the fields in FW are only 8bits wide */
5349 if (WARN_ON(tid > 0xFF))
5352 mutex_lock(&wl->mutex);
5354 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5359 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5360 hlid = wlvif->sta.hlid;
5361 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5362 struct wl1271_station *wl_sta;
5364 wl_sta = (struct wl1271_station *)sta->drv_priv;
5365 hlid = wl_sta->hlid;
5371 ba_bitmap = &wl->links[hlid].ba_bitmap;
5373 ret = pm_runtime_get_sync(wl->dev);
5375 pm_runtime_put_noidle(wl->dev);
5379 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5383 case IEEE80211_AMPDU_RX_START:
5384 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5389 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5391 wl1271_error("exceeded max RX BA sessions");
5395 if (*ba_bitmap & BIT(tid)) {
5397 wl1271_error("cannot enable RX BA session on active "
5402 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5407 *ba_bitmap |= BIT(tid);
5408 wl->ba_rx_session_count++;
5412 case IEEE80211_AMPDU_RX_STOP:
5413 if (!(*ba_bitmap & BIT(tid))) {
5415 * this happens on reconfig - so only output a debug
5416 * message for now, and don't fail the function.
5418 wl1271_debug(DEBUG_MAC80211,
5419 "no active RX BA session on tid: %d",
5425 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5428 *ba_bitmap &= ~BIT(tid);
5429 wl->ba_rx_session_count--;
5434 * The BA initiator session management in FW independently.
5435 * Falling break here on purpose for all TX APDU commands.
5437 case IEEE80211_AMPDU_TX_START:
5438 case IEEE80211_AMPDU_TX_STOP_CONT:
5439 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5440 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5441 case IEEE80211_AMPDU_TX_OPERATIONAL:
5446 wl1271_error("Incorrect ampdu action id=%x\n", action);
5450 pm_runtime_mark_last_busy(wl->dev);
5451 pm_runtime_put_autosuspend(wl->dev);
5454 mutex_unlock(&wl->mutex);
5459 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5460 struct ieee80211_vif *vif,
5461 const struct cfg80211_bitrate_mask *mask)
5463 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5464 struct wl1271 *wl = hw->priv;
5467 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5468 mask->control[NL80211_BAND_2GHZ].legacy,
5469 mask->control[NL80211_BAND_5GHZ].legacy);
5471 mutex_lock(&wl->mutex);
5473 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5474 wlvif->bitrate_masks[i] =
5475 wl1271_tx_enabled_rates_get(wl,
5476 mask->control[i].legacy,
5479 if (unlikely(wl->state != WLCORE_STATE_ON))
5482 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5483 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5485 ret = pm_runtime_get_sync(wl->dev);
5487 pm_runtime_put_noidle(wl->dev);
5491 wl1271_set_band_rate(wl, wlvif);
5493 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5494 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5496 pm_runtime_mark_last_busy(wl->dev);
5497 pm_runtime_put_autosuspend(wl->dev);
5500 mutex_unlock(&wl->mutex);
5505 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5506 struct ieee80211_vif *vif,
5507 struct ieee80211_channel_switch *ch_switch)
5509 struct wl1271 *wl = hw->priv;
5510 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5513 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5515 wl1271_tx_flush(wl);
5517 mutex_lock(&wl->mutex);
5519 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5520 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5521 ieee80211_chswitch_done(vif, false);
5523 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5527 ret = pm_runtime_get_sync(wl->dev);
5529 pm_runtime_put_noidle(wl->dev);
5533 /* TODO: change mac80211 to pass vif as param */
5535 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5536 unsigned long delay_usec;
5538 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5542 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5544 /* indicate failure 5 seconds after channel switch time */
5545 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5547 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5548 usecs_to_jiffies(delay_usec) +
5549 msecs_to_jiffies(5000));
5553 pm_runtime_mark_last_busy(wl->dev);
5554 pm_runtime_put_autosuspend(wl->dev);
5557 mutex_unlock(&wl->mutex);
5560 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5561 struct wl12xx_vif *wlvif,
5564 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5565 struct sk_buff *beacon =
5566 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5571 return cfg80211_find_ie(eid,
5572 beacon->data + ieoffset,
5573 beacon->len - ieoffset);
5576 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5580 const struct ieee80211_channel_sw_ie *ie_csa;
5582 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5586 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5587 *csa_count = ie_csa->count;
5592 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5593 struct ieee80211_vif *vif,
5594 struct cfg80211_chan_def *chandef)
5596 struct wl1271 *wl = hw->priv;
5597 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5598 struct ieee80211_channel_switch ch_switch = {
5600 .chandef = *chandef,
5604 wl1271_debug(DEBUG_MAC80211,
5605 "mac80211 channel switch beacon (role %d)",
5608 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5610 wl1271_error("error getting beacon (for CSA counter)");
5614 mutex_lock(&wl->mutex);
5616 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5621 ret = pm_runtime_get_sync(wl->dev);
5623 pm_runtime_put_noidle(wl->dev);
5627 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5631 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5634 pm_runtime_mark_last_busy(wl->dev);
5635 pm_runtime_put_autosuspend(wl->dev);
5637 mutex_unlock(&wl->mutex);
5640 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5641 u32 queues, bool drop)
5643 struct wl1271 *wl = hw->priv;
5645 wl1271_tx_flush(wl);
5648 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5649 struct ieee80211_vif *vif,
5650 struct ieee80211_channel *chan,
5652 enum ieee80211_roc_type type)
5654 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5655 struct wl1271 *wl = hw->priv;
5656 int channel, active_roc, ret = 0;
5658 channel = ieee80211_frequency_to_channel(chan->center_freq);
5660 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5661 channel, wlvif->role_id);
5663 mutex_lock(&wl->mutex);
5665 if (unlikely(wl->state != WLCORE_STATE_ON))
5668 /* return EBUSY if we can't ROC right now */
5669 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5670 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5671 wl1271_warning("active roc on role %d", active_roc);
5676 ret = pm_runtime_get_sync(wl->dev);
5678 pm_runtime_put_noidle(wl->dev);
5682 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5687 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5688 msecs_to_jiffies(duration));
5690 pm_runtime_mark_last_busy(wl->dev);
5691 pm_runtime_put_autosuspend(wl->dev);
5693 mutex_unlock(&wl->mutex);
5697 static int __wlcore_roc_completed(struct wl1271 *wl)
5699 struct wl12xx_vif *wlvif;
5702 /* already completed */
5703 if (unlikely(!wl->roc_vif))
5706 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5708 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5711 ret = wl12xx_stop_dev(wl, wlvif);
5720 static int wlcore_roc_completed(struct wl1271 *wl)
5724 wl1271_debug(DEBUG_MAC80211, "roc complete");
5726 mutex_lock(&wl->mutex);
5728 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5733 ret = pm_runtime_get_sync(wl->dev);
5735 pm_runtime_put_noidle(wl->dev);
5739 ret = __wlcore_roc_completed(wl);
5741 pm_runtime_mark_last_busy(wl->dev);
5742 pm_runtime_put_autosuspend(wl->dev);
5744 mutex_unlock(&wl->mutex);
5749 static void wlcore_roc_complete_work(struct work_struct *work)
5751 struct delayed_work *dwork;
5755 dwork = to_delayed_work(work);
5756 wl = container_of(dwork, struct wl1271, roc_complete_work);
5758 ret = wlcore_roc_completed(wl);
5760 ieee80211_remain_on_channel_expired(wl->hw);
5763 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5765 struct wl1271 *wl = hw->priv;
5767 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5770 wl1271_tx_flush(wl);
5773 * we can't just flush_work here, because it might deadlock
5774 * (as we might get called from the same workqueue)
5776 cancel_delayed_work_sync(&wl->roc_complete_work);
5777 wlcore_roc_completed(wl);
5782 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5783 struct ieee80211_vif *vif,
5784 struct ieee80211_sta *sta,
5787 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5789 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5791 if (!(changed & IEEE80211_RC_BW_CHANGED))
5794 /* this callback is atomic, so schedule a new work */
5795 wlvif->rc_update_bw = sta->bandwidth;
5796 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5797 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5800 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5801 struct ieee80211_vif *vif,
5802 struct ieee80211_sta *sta,
5803 struct station_info *sinfo)
5805 struct wl1271 *wl = hw->priv;
5806 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5810 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5812 mutex_lock(&wl->mutex);
5814 if (unlikely(wl->state != WLCORE_STATE_ON))
5817 ret = pm_runtime_get_sync(wl->dev);
5819 pm_runtime_put_noidle(wl->dev);
5823 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5827 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5828 sinfo->signal = rssi_dbm;
5831 pm_runtime_mark_last_busy(wl->dev);
5832 pm_runtime_put_autosuspend(wl->dev);
5835 mutex_unlock(&wl->mutex);
5838 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5839 struct ieee80211_sta *sta)
5841 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5842 struct wl1271 *wl = hw->priv;
5843 u8 hlid = wl_sta->hlid;
5845 /* return in units of Kbps */
5846 return (wl->links[hlid].fw_rate_mbps * 1000);
5849 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5851 struct wl1271 *wl = hw->priv;
5854 mutex_lock(&wl->mutex);
5856 if (unlikely(wl->state != WLCORE_STATE_ON))
5859 /* packets are considered pending if in the TX queue or the FW */
5860 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5862 mutex_unlock(&wl->mutex);
5867 /* can't be const, mac80211 writes to this */
5868 static struct ieee80211_rate wl1271_rates[] = {
5870 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5871 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5873 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5874 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5875 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5877 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5878 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5879 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5881 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5882 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5883 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5885 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5886 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5888 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5889 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5891 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5892 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5894 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5895 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5897 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5898 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5900 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5901 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5903 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5904 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5906 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5907 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5910 /* can't be const, mac80211 writes to this */
5911 static struct ieee80211_channel wl1271_channels[] = {
5912 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5913 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5914 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5915 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5917 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5918 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5919 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5920 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5921 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5922 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5923 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5924 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5925 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5928 /* can't be const, mac80211 writes to this */
5929 static struct ieee80211_supported_band wl1271_band_2ghz = {
5930 .channels = wl1271_channels,
5931 .n_channels = ARRAY_SIZE(wl1271_channels),
5932 .bitrates = wl1271_rates,
5933 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5936 /* 5 GHz data rates for WL1273 */
5937 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5939 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5940 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5942 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5943 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5945 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5946 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5948 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5949 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5951 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5952 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5954 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5955 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5957 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5958 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5960 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5961 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5964 /* 5 GHz band channels for WL1273 */
5965 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5966 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5988 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5989 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5990 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5991 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5992 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5993 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5994 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5995 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5996 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5999 static struct ieee80211_supported_band wl1271_band_5ghz = {
6000 .channels = wl1271_channels_5ghz,
6001 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6002 .bitrates = wl1271_rates_5ghz,
6003 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6006 static const struct ieee80211_ops wl1271_ops = {
6007 .start = wl1271_op_start,
6008 .stop = wlcore_op_stop,
6009 .add_interface = wl1271_op_add_interface,
6010 .remove_interface = wl1271_op_remove_interface,
6011 .change_interface = wl12xx_op_change_interface,
6013 .suspend = wl1271_op_suspend,
6014 .resume = wl1271_op_resume,
6016 .config = wl1271_op_config,
6017 .prepare_multicast = wl1271_op_prepare_multicast,
6018 .configure_filter = wl1271_op_configure_filter,
6020 .set_key = wlcore_op_set_key,
6021 .hw_scan = wl1271_op_hw_scan,
6022 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6023 .sched_scan_start = wl1271_op_sched_scan_start,
6024 .sched_scan_stop = wl1271_op_sched_scan_stop,
6025 .bss_info_changed = wl1271_op_bss_info_changed,
6026 .set_frag_threshold = wl1271_op_set_frag_threshold,
6027 .set_rts_threshold = wl1271_op_set_rts_threshold,
6028 .conf_tx = wl1271_op_conf_tx,
6029 .get_tsf = wl1271_op_get_tsf,
6030 .get_survey = wl1271_op_get_survey,
6031 .sta_state = wl12xx_op_sta_state,
6032 .ampdu_action = wl1271_op_ampdu_action,
6033 .tx_frames_pending = wl1271_tx_frames_pending,
6034 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6035 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6036 .channel_switch = wl12xx_op_channel_switch,
6037 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6038 .flush = wlcore_op_flush,
6039 .remain_on_channel = wlcore_op_remain_on_channel,
6040 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6041 .add_chanctx = wlcore_op_add_chanctx,
6042 .remove_chanctx = wlcore_op_remove_chanctx,
6043 .change_chanctx = wlcore_op_change_chanctx,
6044 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6045 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6046 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6047 .sta_rc_update = wlcore_op_sta_rc_update,
6048 .sta_statistics = wlcore_op_sta_statistics,
6049 .get_expected_throughput = wlcore_op_get_expected_throughput,
6050 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6054 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6060 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6061 wl1271_error("Illegal RX rate from HW: %d", rate);
6065 idx = wl->band_rate_to_idx[band][rate];
6066 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6067 wl1271_error("Unsupported RX rate from HW: %d", rate);
6074 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6078 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6081 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6082 wl1271_warning("NIC part of the MAC address wraps around!");
6084 for (i = 0; i < wl->num_mac_addr; i++) {
6085 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6086 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6087 wl->addresses[i].addr[2] = (u8) oui;
6088 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6089 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6090 wl->addresses[i].addr[5] = (u8) nic;
6094 /* we may be one address short at the most */
6095 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6098 * turn on the LAA bit in the first address and use it as
6101 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6102 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6103 memcpy(&wl->addresses[idx], &wl->addresses[0],
6104 sizeof(wl->addresses[0]));
6106 wl->addresses[idx].addr[0] |= BIT(1);
6109 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6110 wl->hw->wiphy->addresses = wl->addresses;
6113 static int wl12xx_get_hw_info(struct wl1271 *wl)
6117 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6121 wl->fuse_oui_addr = 0;
6122 wl->fuse_nic_addr = 0;
6124 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6128 if (wl->ops->get_mac)
6129 ret = wl->ops->get_mac(wl);
6135 static int wl1271_register_hw(struct wl1271 *wl)
6138 u32 oui_addr = 0, nic_addr = 0;
6139 struct platform_device *pdev = wl->pdev;
6140 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6142 if (wl->mac80211_registered)
6145 if (wl->nvs_len >= 12) {
6146 /* NOTE: The wl->nvs->nvs element must be first, in
6147 * order to simplify the casting, we assume it is at
6148 * the beginning of the wl->nvs structure.
6150 u8 *nvs_ptr = (u8 *)wl->nvs;
6153 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6155 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6158 /* if the MAC address is zeroed in the NVS derive from fuse */
6159 if (oui_addr == 0 && nic_addr == 0) {
6160 oui_addr = wl->fuse_oui_addr;
6161 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6162 nic_addr = wl->fuse_nic_addr + 1;
6165 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6166 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6167 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6168 wl1271_warning("This default nvs file can be removed from the file system");
6170 wl1271_warning("Your device performance is not optimized.");
6171 wl1271_warning("Please use the calibrator tool to configure your device.");
6174 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6175 wl1271_warning("Fuse mac address is zero. using random mac");
6176 /* Use TI oui and a random nic */
6177 oui_addr = WLCORE_TI_OUI_ADDRESS;
6178 nic_addr = get_random_int();
6180 oui_addr = wl->fuse_oui_addr;
6181 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6182 nic_addr = wl->fuse_nic_addr + 1;
6186 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6188 ret = ieee80211_register_hw(wl->hw);
6190 wl1271_error("unable to register mac80211 hw: %d", ret);
6194 wl->mac80211_registered = true;
6196 wl1271_debugfs_init(wl);
6198 wl1271_notice("loaded");
6204 static void wl1271_unregister_hw(struct wl1271 *wl)
6207 wl1271_plt_stop(wl);
6209 ieee80211_unregister_hw(wl->hw);
6210 wl->mac80211_registered = false;
6214 static int wl1271_init_ieee80211(struct wl1271 *wl)
6217 static const u32 cipher_suites[] = {
6218 WLAN_CIPHER_SUITE_WEP40,
6219 WLAN_CIPHER_SUITE_WEP104,
6220 WLAN_CIPHER_SUITE_TKIP,
6221 WLAN_CIPHER_SUITE_CCMP,
6222 WL1271_CIPHER_SUITE_GEM,
6225 /* The tx descriptor buffer */
6226 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6228 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6229 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6232 /* FIXME: find a proper value */
6233 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6235 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6236 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6237 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6238 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6239 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6240 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6241 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6242 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6243 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6244 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6245 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6246 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6247 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6248 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6250 wl->hw->wiphy->cipher_suites = cipher_suites;
6251 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6253 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6254 BIT(NL80211_IFTYPE_AP) |
6255 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6256 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6257 #ifdef CONFIG_MAC80211_MESH
6258 BIT(NL80211_IFTYPE_MESH_POINT) |
6260 BIT(NL80211_IFTYPE_P2P_GO);
6262 wl->hw->wiphy->max_scan_ssids = 1;
6263 wl->hw->wiphy->max_sched_scan_ssids = 16;
6264 wl->hw->wiphy->max_match_sets = 16;
6266 * Maximum length of elements in scanning probe request templates
6267 * should be the maximum length possible for a template, without
6268 * the IEEE80211 header of the template
6270 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6271 sizeof(struct ieee80211_header);
6273 wl->hw->wiphy->max_sched_scan_reqs = 1;
6274 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6275 sizeof(struct ieee80211_header);
6277 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6279 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6280 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6281 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6283 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6285 /* make sure all our channels fit in the scanned_ch bitmask */
6286 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6287 ARRAY_SIZE(wl1271_channels_5ghz) >
6288 WL1271_MAX_CHANNELS);
6290 * clear channel flags from the previous usage
6291 * and restore max_power & max_antenna_gain values.
6293 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6294 wl1271_band_2ghz.channels[i].flags = 0;
6295 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6296 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6299 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6300 wl1271_band_5ghz.channels[i].flags = 0;
6301 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6302 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6306 * We keep local copies of the band structs because we need to
6307 * modify them on a per-device basis.
6309 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6310 sizeof(wl1271_band_2ghz));
6311 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6312 &wl->ht_cap[NL80211_BAND_2GHZ],
6313 sizeof(*wl->ht_cap));
6314 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6315 sizeof(wl1271_band_5ghz));
6316 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6317 &wl->ht_cap[NL80211_BAND_5GHZ],
6318 sizeof(*wl->ht_cap));
6320 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6321 &wl->bands[NL80211_BAND_2GHZ];
6322 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6323 &wl->bands[NL80211_BAND_5GHZ];
6326 * allow 4 queues per mac address we support +
6327 * 1 cab queue per mac + one global offchannel Tx queue
6329 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6331 /* the last queue is the offchannel queue */
6332 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6333 wl->hw->max_rates = 1;
6335 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6337 /* the FW answers probe-requests in AP-mode */
6338 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6339 wl->hw->wiphy->probe_resp_offload =
6340 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6341 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6342 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6344 /* allowed interface combinations */
6345 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6346 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6348 /* register vendor commands */
6349 wlcore_set_vendor_commands(wl->hw->wiphy);
6351 SET_IEEE80211_DEV(wl->hw, wl->dev);
6353 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6354 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6356 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6361 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6364 struct ieee80211_hw *hw;
6369 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6371 wl1271_error("could not alloc ieee80211_hw");
6377 memset(wl, 0, sizeof(*wl));
6379 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6381 wl1271_error("could not alloc wl priv");
6383 goto err_priv_alloc;
6386 INIT_LIST_HEAD(&wl->wlvif_list);
6391 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6392 * we don't allocate any additional resource here, so that's fine.
6394 for (i = 0; i < NUM_TX_QUEUES; i++)
6395 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6396 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6398 skb_queue_head_init(&wl->deferred_rx_queue);
6399 skb_queue_head_init(&wl->deferred_tx_queue);
6401 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6402 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6403 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6404 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6405 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6406 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6408 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6409 if (!wl->freezable_wq) {
6416 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6417 wl->band = NL80211_BAND_2GHZ;
6418 wl->channel_type = NL80211_CHAN_NO_HT;
6420 wl->sg_enabled = true;
6421 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6422 wl->recovery_count = 0;
6425 wl->ap_fw_ps_map = 0;
6427 wl->system_hlid = WL12XX_SYSTEM_HLID;
6428 wl->active_sta_count = 0;
6429 wl->active_link_count = 0;
6432 /* The system link is always allocated */
6433 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6435 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6436 for (i = 0; i < wl->num_tx_desc; i++)
6437 wl->tx_frames[i] = NULL;
6439 spin_lock_init(&wl->wl_lock);
6441 wl->state = WLCORE_STATE_OFF;
6442 wl->fw_type = WL12XX_FW_TYPE_NONE;
6443 mutex_init(&wl->mutex);
6444 mutex_init(&wl->flush_mutex);
6445 init_completion(&wl->nvs_loading_complete);
6447 order = get_order(aggr_buf_size);
6448 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6449 if (!wl->aggr_buf) {
6453 wl->aggr_buf_size = aggr_buf_size;
6455 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6456 if (!wl->dummy_packet) {
6461 /* Allocate one page for the FW log */
6462 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6465 goto err_dummy_packet;
6468 wl->mbox_size = mbox_size;
6469 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6475 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6476 if (!wl->buffer_32) {
6487 free_page((unsigned long)wl->fwlog);
6490 dev_kfree_skb(wl->dummy_packet);
6493 free_pages((unsigned long)wl->aggr_buf, order);
6496 destroy_workqueue(wl->freezable_wq);
6499 wl1271_debugfs_exit(wl);
6503 ieee80211_free_hw(hw);
6507 return ERR_PTR(ret);
6509 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6511 int wlcore_free_hw(struct wl1271 *wl)
6513 /* Unblock any fwlog readers */
6514 mutex_lock(&wl->mutex);
6515 wl->fwlog_size = -1;
6516 mutex_unlock(&wl->mutex);
6518 wlcore_sysfs_free(wl);
6520 kfree(wl->buffer_32);
6522 free_page((unsigned long)wl->fwlog);
6523 dev_kfree_skb(wl->dummy_packet);
6524 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6526 wl1271_debugfs_exit(wl);
6530 wl->fw_type = WL12XX_FW_TYPE_NONE;
6534 kfree(wl->raw_fw_status);
6535 kfree(wl->fw_status);
6536 kfree(wl->tx_res_if);
6537 destroy_workqueue(wl->freezable_wq);
6540 ieee80211_free_hw(wl->hw);
6544 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6547 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6548 .flags = WIPHY_WOWLAN_ANY,
6549 .n_patterns = WL1271_MAX_RX_FILTERS,
6550 .pattern_min_len = 1,
6551 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6555 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6557 return IRQ_WAKE_THREAD;
6560 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6562 struct wl1271 *wl = context;
6563 struct platform_device *pdev = wl->pdev;
6564 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6565 struct resource *res;
6568 irq_handler_t hardirq_fn = NULL;
6571 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6573 wl1271_error("Could not allocate nvs data");
6576 wl->nvs_len = fw->size;
6577 } else if (pdev_data->family->nvs_name) {
6578 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6579 pdev_data->family->nvs_name);
6587 ret = wl->ops->setup(wl);
6591 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6593 /* adjust some runtime configuration parameters */
6594 wlcore_adjust_conf(wl);
6596 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6598 wl1271_error("Could not get IRQ resource");
6602 wl->irq = res->start;
6603 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6604 wl->if_ops = pdev_data->if_ops;
6606 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6607 hardirq_fn = wlcore_hardirq;
6609 wl->irq_flags |= IRQF_ONESHOT;
6611 ret = wl12xx_set_power_on(wl);
6615 ret = wl12xx_get_hw_info(wl);
6617 wl1271_error("couldn't get hw info");
6618 wl1271_power_off(wl);
6622 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6623 wl->irq_flags, pdev->name, wl);
6625 wl1271_error("interrupt configuration failed");
6626 wl1271_power_off(wl);
6631 device_init_wakeup(wl->dev, true);
6633 ret = enable_irq_wake(wl->irq);
6635 wl->irq_wake_enabled = true;
6636 if (pdev_data->pwr_in_suspend)
6637 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6640 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6642 wl->wakeirq = res->start;
6643 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6644 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6646 wl->wakeirq = -ENODEV;
6648 wl->wakeirq = -ENODEV;
6651 disable_irq(wl->irq);
6652 wl1271_power_off(wl);
6654 ret = wl->ops->identify_chip(wl);
6658 ret = wl1271_init_ieee80211(wl);
6662 ret = wl1271_register_hw(wl);
6666 ret = wlcore_sysfs_init(wl);
6670 wl->initialized = true;
6674 wl1271_unregister_hw(wl);
6677 if (wl->wakeirq >= 0)
6678 dev_pm_clear_wake_irq(wl->dev);
6679 device_init_wakeup(wl->dev, false);
6680 free_irq(wl->irq, wl);
6686 release_firmware(fw);
6687 complete_all(&wl->nvs_loading_complete);
6690 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6692 struct wl1271 *wl = dev_get_drvdata(dev);
6693 struct wl12xx_vif *wlvif;
6696 /* We do not enter elp sleep in PLT mode */
6700 /* Nothing to do if no ELP mode requested */
6701 if (wl->sleep_auth != WL1271_PSM_ELP)
6704 wl12xx_for_each_wlvif(wl, wlvif) {
6705 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6706 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6710 wl1271_debug(DEBUG_PSM, "chip to elp");
6711 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6713 wl12xx_queue_recovery_work(wl);
6718 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6723 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6725 struct wl1271 *wl = dev_get_drvdata(dev);
6726 DECLARE_COMPLETION_ONSTACK(compl);
6727 unsigned long flags;
6729 unsigned long start_time = jiffies;
6730 bool pending = false;
6731 bool recovery = false;
6733 /* Nothing to do if no ELP mode requested */
6734 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6737 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6739 spin_lock_irqsave(&wl->wl_lock, flags);
6740 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6743 wl->elp_compl = &compl;
6744 spin_unlock_irqrestore(&wl->wl_lock, flags);
6746 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6753 ret = wait_for_completion_timeout(&compl,
6754 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6756 wl1271_warning("ELP wakeup timeout!");
6758 /* Return no error for runtime PM for recovery */
6765 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6767 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6768 jiffies_to_msecs(jiffies - start_time));
6773 spin_lock_irqsave(&wl->wl_lock, flags);
6774 wl->elp_compl = NULL;
6775 spin_unlock_irqrestore(&wl->wl_lock, flags);
6778 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6779 wl12xx_queue_recovery_work(wl);
6785 static const struct dev_pm_ops wlcore_pm_ops = {
6786 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6787 wlcore_runtime_resume,
6791 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6793 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6794 const char *nvs_name;
6797 if (!wl->ops || !wl->ptable || !pdev_data)
6800 wl->dev = &pdev->dev;
6802 platform_set_drvdata(pdev, wl);
6804 if (pdev_data->family && pdev_data->family->nvs_name) {
6805 nvs_name = pdev_data->family->nvs_name;
6806 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6807 nvs_name, &pdev->dev, GFP_KERNEL,
6810 wl1271_error("request_firmware_nowait failed for %s: %d",
6812 complete_all(&wl->nvs_loading_complete);
6815 wlcore_nvs_cb(NULL, wl);
6818 wl->dev->driver->pm = &wlcore_pm_ops;
6819 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6820 pm_runtime_use_autosuspend(wl->dev);
6821 pm_runtime_enable(wl->dev);
6825 EXPORT_SYMBOL_GPL(wlcore_probe);
6827 int wlcore_remove(struct platform_device *pdev)
6829 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6830 struct wl1271 *wl = platform_get_drvdata(pdev);
6833 error = pm_runtime_get_sync(wl->dev);
6835 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6837 wl->dev->driver->pm = NULL;
6839 if (pdev_data->family && pdev_data->family->nvs_name)
6840 wait_for_completion(&wl->nvs_loading_complete);
6841 if (!wl->initialized)
6844 if (wl->wakeirq >= 0) {
6845 dev_pm_clear_wake_irq(wl->dev);
6846 wl->wakeirq = -ENODEV;
6849 device_init_wakeup(wl->dev, false);
6851 if (wl->irq_wake_enabled)
6852 disable_irq_wake(wl->irq);
6854 wl1271_unregister_hw(wl);
6856 pm_runtime_put_sync(wl->dev);
6857 pm_runtime_dont_use_autosuspend(wl->dev);
6858 pm_runtime_disable(wl->dev);
6860 free_irq(wl->irq, wl);
6865 EXPORT_SYMBOL_GPL(wlcore_remove);
6867 u32 wl12xx_debug_level = DEBUG_NONE;
6868 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6869 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6870 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6872 module_param_named(fwlog, fwlog_param, charp, 0);
6873 MODULE_PARM_DESC(fwlog,
6874 "FW logger options: continuous, dbgpins or disable");
6876 module_param(fwlog_mem_blocks, int, 0600);
6877 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6879 module_param(bug_on_recovery, int, 0600);
6880 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6882 module_param(no_recovery, int, 0600);
6883 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6885 MODULE_LICENSE("GPL");
6886 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6887 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");