2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
30 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
32 wcn36xx_dbg(WCN36XX_DBG_DXE,
33 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
36 writel(data, wcn->ccu_base + addr);
39 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
41 wcn36xx_dbg(WCN36XX_DBG_DXE,
42 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
45 writel(data, wcn->dxe_base + addr);
48 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
50 *data = readl(wcn->dxe_base + addr);
52 wcn36xx_dbg(WCN36XX_DBG_DXE,
53 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
57 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
59 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
62 for (i = 0; i < ch->desc_num && ctl; i++) {
69 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
71 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
75 spin_lock_init(&ch->lock);
76 for (i = 0; i < ch->desc_num; i++) {
77 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
81 cur_ctl->ctl_blk_order = i;
83 ch->head_blk_ctl = cur_ctl;
84 ch->tail_blk_ctl = cur_ctl;
85 } else if (ch->desc_num - 1 == i) {
86 prev_ctl->next = cur_ctl;
87 cur_ctl->next = ch->head_blk_ctl;
89 prev_ctl->next = cur_ctl;
97 wcn36xx_dxe_free_ctl_block(ch);
101 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
105 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
110 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
115 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
116 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
118 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
121 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
124 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
127 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
130 /* DXE control block allocation */
131 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
134 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
137 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
140 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
144 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
145 ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146 WCN36XX_SMSM_WLAN_TX_ENABLE |
147 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
155 wcn36xx_err("Failed to allocate DXE control blocks\n");
156 wcn36xx_dxe_free_ctl_blks(wcn);
160 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
162 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
168 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
170 struct wcn36xx_dxe_desc *cur_dxe = NULL;
171 struct wcn36xx_dxe_desc *prev_dxe = NULL;
172 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
179 if (!wcn_ch->cpu_addr)
182 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
183 cur_ctl = wcn_ch->head_blk_ctl;
185 for (i = 0; i < wcn_ch->desc_num; i++) {
186 cur_ctl->desc = cur_dxe;
187 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
188 i * sizeof(struct wcn36xx_dxe_desc);
190 switch (wcn_ch->ch_type) {
191 case WCN36XX_DXE_CH_TX_L:
192 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
193 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
195 case WCN36XX_DXE_CH_TX_H:
196 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
197 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
199 case WCN36XX_DXE_CH_RX_L:
200 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
201 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
203 case WCN36XX_DXE_CH_RX_H:
204 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
205 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
209 cur_dxe->phy_next_l = 0;
210 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
211 prev_dxe->phy_next_l =
212 cur_ctl->desc_phy_addr;
213 } else if (i == (wcn_ch->desc_num - 1)) {
214 prev_dxe->phy_next_l =
215 cur_ctl->desc_phy_addr;
216 cur_dxe->phy_next_l =
217 wcn_ch->head_blk_ctl->desc_phy_addr;
219 cur_ctl = cur_ctl->next;
227 static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
231 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
232 dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
235 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
236 struct wcn36xx_dxe_mem_pool *pool)
238 int i, chunk_size = pool->chunk_size;
239 dma_addr_t bd_phy_addr = pool->phy_addr;
240 void *bd_cpu_addr = pool->virt_addr;
241 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
243 for (i = 0; i < ch->desc_num; i++) {
244 /* Only every second dxe needs a bd pointer,
245 the other will point to the skb data */
247 cur->bd_phy_addr = bd_phy_addr;
248 cur->bd_cpu_addr = bd_cpu_addr;
249 bd_phy_addr += chunk_size;
250 bd_cpu_addr += chunk_size;
252 cur->bd_phy_addr = 0;
253 cur->bd_cpu_addr = NULL;
259 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
263 wcn36xx_dxe_read_register(wcn,
264 WCN36XX_DXE_INT_MASK_REG,
269 wcn36xx_dxe_write_register(wcn,
270 WCN36XX_DXE_INT_MASK_REG,
275 static int wcn36xx_dxe_fill_skb(struct device *dev,
276 struct wcn36xx_dxe_ctl *ctl,
279 struct wcn36xx_dxe_desc *dxe = ctl->desc;
282 skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
286 dxe->dst_addr_l = dma_map_single(dev,
287 skb_tail_pointer(skb),
290 if (dma_mapping_error(dev, dxe->dst_addr_l)) {
291 dev_err(dev, "unable to map skb\n");
300 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
301 struct wcn36xx_dxe_ch *wcn_ch)
304 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
306 cur_ctl = wcn_ch->head_blk_ctl;
308 for (i = 0; i < wcn_ch->desc_num; i++) {
309 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
310 cur_ctl = cur_ctl->next;
316 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
317 struct wcn36xx_dxe_ch *wcn_ch)
319 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
322 for (i = 0; i < wcn_ch->desc_num; i++) {
328 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
330 struct ieee80211_tx_info *info;
334 spin_lock_irqsave(&wcn->dxe_lock, flags);
335 skb = wcn->tx_ack_skb;
336 wcn->tx_ack_skb = NULL;
337 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
340 wcn36xx_warn("Spurious TX complete indication\n");
344 info = IEEE80211_SKB_CB(skb);
347 info->flags |= IEEE80211_TX_STAT_ACK;
349 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
351 ieee80211_tx_status_irqsafe(wcn->hw, skb);
352 ieee80211_wake_queues(wcn->hw);
355 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
357 struct wcn36xx_dxe_ctl *ctl;
358 struct ieee80211_tx_info *info;
362 * Make at least one loop of do-while because in case ring is
363 * completely full head and tail are pointing to the same element
364 * and while-do will not make any cycles.
366 spin_lock_irqsave(&ch->lock, flags);
367 ctl = ch->tail_blk_ctl;
369 if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
373 READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
374 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
375 ctl->skb->len, DMA_TO_DEVICE);
376 info = IEEE80211_SKB_CB(ctl->skb);
377 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
378 /* Keep frame until TX status comes */
379 ieee80211_free_txskb(wcn->hw, ctl->skb);
382 if (wcn->queues_stopped) {
383 wcn->queues_stopped = false;
384 ieee80211_wake_queues(wcn->hw);
390 } while (ctl != ch->head_blk_ctl);
392 ch->tail_blk_ctl = ctl;
393 spin_unlock_irqrestore(&ch->lock, flags);
396 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
398 struct wcn36xx *wcn = (struct wcn36xx *)dev;
399 int int_src, int_reason;
401 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
403 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
404 wcn36xx_dxe_read_register(wcn,
405 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
408 wcn36xx_dxe_write_register(wcn,
409 WCN36XX_DXE_0_INT_CLR,
410 WCN36XX_INT_MASK_CHAN_TX_H);
412 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
413 wcn36xx_dxe_write_register(wcn,
414 WCN36XX_DXE_0_INT_ERR_CLR,
415 WCN36XX_INT_MASK_CHAN_TX_H);
417 wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
421 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
422 wcn36xx_dxe_write_register(wcn,
423 WCN36XX_DXE_0_INT_DONE_CLR,
424 WCN36XX_INT_MASK_CHAN_TX_H);
427 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
428 wcn36xx_dxe_write_register(wcn,
429 WCN36XX_DXE_0_INT_ED_CLR,
430 WCN36XX_INT_MASK_CHAN_TX_H);
433 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
436 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
437 WCN36XX_CH_STAT_INT_ED_MASK))
438 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
441 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
442 wcn36xx_dxe_read_register(wcn,
443 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
446 wcn36xx_dxe_write_register(wcn,
447 WCN36XX_DXE_0_INT_CLR,
448 WCN36XX_INT_MASK_CHAN_TX_L);
451 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
452 wcn36xx_dxe_write_register(wcn,
453 WCN36XX_DXE_0_INT_ERR_CLR,
454 WCN36XX_INT_MASK_CHAN_TX_L);
456 wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
460 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
461 wcn36xx_dxe_write_register(wcn,
462 WCN36XX_DXE_0_INT_DONE_CLR,
463 WCN36XX_INT_MASK_CHAN_TX_L);
466 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
467 wcn36xx_dxe_write_register(wcn,
468 WCN36XX_DXE_0_INT_ED_CLR,
469 WCN36XX_INT_MASK_CHAN_TX_L);
472 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
475 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
476 WCN36XX_CH_STAT_INT_ED_MASK))
477 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
483 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
485 struct wcn36xx *wcn = (struct wcn36xx *)dev;
487 wcn36xx_dxe_rx_frame(wcn);
492 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
496 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
497 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
499 wcn36xx_err("failed to alloc tx irq\n");
503 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
506 wcn36xx_err("failed to alloc rx irq\n");
510 enable_irq_wake(wcn->rx_irq);
515 free_irq(wcn->tx_irq, wcn);
521 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
522 struct wcn36xx_dxe_ch *ch,
528 struct wcn36xx_dxe_desc *dxe;
529 struct wcn36xx_dxe_ctl *ctl;
535 wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
536 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
538 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
539 wcn36xx_dxe_write_register(wcn,
540 WCN36XX_DXE_0_INT_ERR_CLR,
543 wcn36xx_err("DXE IRQ reported error on RX channel\n");
546 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
547 wcn36xx_dxe_write_register(wcn,
548 WCN36XX_DXE_0_INT_DONE_CLR,
551 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
552 wcn36xx_dxe_write_register(wcn,
553 WCN36XX_DXE_0_INT_ED_CLR,
556 if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
557 WCN36XX_CH_STAT_INT_ED_MASK)))
560 spin_lock(&ch->lock);
562 ctl = ch->head_blk_ctl;
565 while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
567 dma_addr = dxe->dst_addr_l;
568 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
570 /* new skb allocation ok. Use the new one and queue
571 * the old one to network system.
573 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
575 wcn36xx_rx_skb(wcn, skb);
576 } /* else keep old skb not submitted and use it for rx DMA */
582 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
584 ch->head_blk_ctl = ctl;
586 spin_unlock(&ch->lock);
591 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
595 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
598 if (int_src & WCN36XX_DXE_INT_CH1_MASK)
599 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
600 WCN36XX_DXE_CTRL_RX_L,
601 WCN36XX_DXE_INT_CH1_MASK,
602 WCN36XX_INT_MASK_CHAN_RX_L,
603 WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
606 if (int_src & WCN36XX_DXE_INT_CH3_MASK)
607 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
608 WCN36XX_DXE_CTRL_RX_H,
609 WCN36XX_DXE_INT_CH3_MASK,
610 WCN36XX_INT_MASK_CHAN_RX_H,
611 WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
614 wcn36xx_warn("No DXE interrupt pending\n");
617 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
622 /* Allocate BD headers for MGMT frames */
624 /* Where this come from ask QC */
625 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
626 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
628 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
629 cpu_addr = dma_alloc_coherent(wcn->dev, s,
630 &wcn->mgmt_mem_pool.phy_addr,
635 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
637 /* Allocate BD headers for DATA frames */
639 /* Where this come from ask QC */
640 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
641 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
643 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
644 cpu_addr = dma_alloc_coherent(wcn->dev, s,
645 &wcn->data_mem_pool.phy_addr,
650 wcn->data_mem_pool.virt_addr = cpu_addr;
655 wcn36xx_dxe_free_mem_pools(wcn);
656 wcn36xx_err("Failed to allocate BD mempool\n");
660 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
662 if (wcn->mgmt_mem_pool.virt_addr)
663 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
664 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
665 wcn->mgmt_mem_pool.virt_addr,
666 wcn->mgmt_mem_pool.phy_addr);
668 if (wcn->data_mem_pool.virt_addr) {
669 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
670 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
671 wcn->data_mem_pool.virt_addr,
672 wcn->data_mem_pool.phy_addr);
676 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
677 struct wcn36xx_vif *vif_priv,
678 struct wcn36xx_tx_bd *bd,
682 struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
683 struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
684 struct wcn36xx_dxe_ch *ch = NULL;
688 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
690 spin_lock_irqsave(&ch->lock, flags);
691 ctl_bd = ch->head_blk_ctl;
692 ctl_skb = ctl_bd->next;
695 * If skb is not null that means that we reached the tail of the ring
696 * hence ring is full. Stop queues to let mac80211 back off until ring
697 * has an empty slot again.
699 if (NULL != ctl_skb->skb) {
700 ieee80211_stop_queues(wcn->hw);
701 wcn->queues_stopped = true;
702 spin_unlock_irqrestore(&ch->lock, flags);
706 if (unlikely(ctl_skb->bd_cpu_addr)) {
707 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
712 desc_bd = ctl_bd->desc;
713 desc_skb = ctl_skb->desc;
717 /* write buffer descriptor */
718 memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
720 /* Set source address of the BD we send */
721 desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
722 desc_bd->dst_addr_l = ch->dxe_wq;
723 desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
725 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
727 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
728 (char *)desc_bd, sizeof(*desc_bd));
729 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
730 "BD >>> ", (char *)ctl_bd->bd_cpu_addr,
731 sizeof(struct wcn36xx_tx_bd));
733 desc_skb->src_addr_l = dma_map_single(wcn->dev,
737 if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
738 dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
744 desc_skb->dst_addr_l = ch->dxe_wq;
745 desc_skb->fr_len = ctl_skb->skb->len;
747 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
748 (char *)desc_skb, sizeof(*desc_skb));
749 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
750 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
752 /* Move the head of the ring to the next empty descriptor */
753 ch->head_blk_ctl = ctl_skb->next;
755 /* Commit all previous writes and set descriptors to VALID */
757 desc_skb->ctrl = ch->ctrl_skb;
759 desc_bd->ctrl = ch->ctrl_bd;
762 * When connected and trying to send data frame chip can be in sleep
763 * mode and writing to the register will not wake up the chip. Instead
764 * notify chip about new frame through SMSM bus.
766 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
767 qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
768 WCN36XX_SMSM_WLAN_TX_ENABLE,
769 WCN36XX_SMSM_WLAN_TX_ENABLE);
771 /* indicate End Of Packet and generate interrupt on descriptor
774 wcn36xx_dxe_write_register(wcn,
775 ch->reg_ctrl, ch->def_ctrl);
780 spin_unlock_irqrestore(&ch->lock, flags);
784 int wcn36xx_dxe_init(struct wcn36xx *wcn)
786 int reg_data = 0, ret;
788 reg_data = WCN36XX_DXE_REG_RESET;
789 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
791 /* Select channels for rx avail and xfer done interrupts... */
792 reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
793 WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
795 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
797 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
799 /***************************************/
800 /* Init descriptors for TX LOW channel */
801 /***************************************/
802 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
804 dev_err(wcn->dev, "Error allocating descriptor\n");
807 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
809 /* Write channel head to a NEXT register */
810 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
811 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
813 /* Program DMA destination addr for TX LOW */
814 wcn36xx_dxe_write_register(wcn,
815 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
816 WCN36XX_DXE_WQ_TX_L);
818 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
819 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
821 /***************************************/
822 /* Init descriptors for TX HIGH channel */
823 /***************************************/
824 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
826 dev_err(wcn->dev, "Error allocating descriptor\n");
830 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
832 /* Write channel head to a NEXT register */
833 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
834 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
836 /* Program DMA destination addr for TX HIGH */
837 wcn36xx_dxe_write_register(wcn,
838 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
839 WCN36XX_DXE_WQ_TX_H);
841 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
843 /* Enable channel interrupts */
844 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
846 /***************************************/
847 /* Init descriptors for RX LOW channel */
848 /***************************************/
849 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
851 dev_err(wcn->dev, "Error allocating descriptor\n");
856 /* For RX we need to preallocated buffers */
857 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
859 /* Write channel head to a NEXT register */
860 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
861 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
863 /* Write DMA source address */
864 wcn36xx_dxe_write_register(wcn,
865 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
866 WCN36XX_DXE_WQ_RX_L);
868 /* Program preallocated destination address */
869 wcn36xx_dxe_write_register(wcn,
870 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
871 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
873 /* Enable default control registers */
874 wcn36xx_dxe_write_register(wcn,
875 WCN36XX_DXE_REG_CTL_RX_L,
876 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
878 /* Enable channel interrupts */
879 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
881 /***************************************/
882 /* Init descriptors for RX HIGH channel */
883 /***************************************/
884 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
886 dev_err(wcn->dev, "Error allocating descriptor\n");
890 /* For RX we need to prealocat buffers */
891 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
893 /* Write chanel head to a NEXT register */
894 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
895 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
897 /* Write DMA source address */
898 wcn36xx_dxe_write_register(wcn,
899 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
900 WCN36XX_DXE_WQ_RX_H);
902 /* Program preallocated destination address */
903 wcn36xx_dxe_write_register(wcn,
904 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
905 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
907 /* Enable default control registers */
908 wcn36xx_dxe_write_register(wcn,
909 WCN36XX_DXE_REG_CTL_RX_H,
910 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
912 /* Enable channel interrupts */
913 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
915 ret = wcn36xx_dxe_request_irqs(wcn);
922 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
924 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
926 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
928 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
933 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
935 free_irq(wcn->tx_irq, wcn);
936 free_irq(wcn->rx_irq, wcn);
938 if (wcn->tx_ack_skb) {
939 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
940 wcn->tx_ack_skb = NULL;
943 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
944 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);