last = readl(&q->regs->dma_idx);
while (q->queued > 0 && q->tail != last) {
- int swq_qid = -1;
-
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
- if (entry.schedule)
- swq_qid = entry.qid;
-
- q->tail = (q->tail + 1) % q->ndesc;
-
- if (entry.skb)
- dev->drv->tx_complete_skb(dev, qid, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
- spin_lock_bh(&q->lock);
- if (swq_qid >= 4)
- dev->q_tx[__MT_TXQ_MAX + swq_qid - 4].swq_queued--;
- else if (swq_qid >= 0)
- dev->q_tx[swq_qid].swq_queued--;
- q->queued--;
- spin_unlock_bh(&q->lock);
}
if (flush) {
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_dev *dev);
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
return nframes;
}
-static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
struct mt76_sw_queue *sq = &dev->q_tx[qid];
- u32 n_dequeued = 0, n_sw_dequeued = 0;
struct mt76_queue_entry entry;
struct mt76_queue *q = sq->q;
bool wake;
- while (q->queued > n_dequeued) {
+ while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
- if (q->entry[q->tail].schedule) {
- q->entry[q->tail].schedule = false;
- n_sw_dequeued++;
- }
-
entry = q->entry[q->tail];
q->entry[q->tail].done = false;
- q->tail = (q->tail + 1) % q->ndesc;
- n_dequeued++;
+ q->entry[q->tail].schedule = false;
- if (qid == MT_TXQ_MCU)
+ if (qid == MT_TXQ_MCU) {
dev_kfree_skb(entry.skb);
- else
- dev->drv->tx_complete_skb(dev, qid, &entry);
- }
-
- spin_lock_bh(&q->lock);
+ entry.skb = NULL;
+ }
- sq->swq_queued -= n_sw_dequeued;
- q->queued -= n_dequeued;
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
if (qid == MT_TXQ_MCU)
- goto out;
+ return;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
-
-out:
- return n_dequeued;
}
static void mt76s_tx_status_data(struct work_struct *work)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
+
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
+{
+ enum mt76_txq_id qid = e->qid % 4;
+ bool ext_phy = e->qid >= 4;
+
+ if (e->skb)
+ dev->drv->tx_complete_skb(dev, qid, e);
+
+ spin_lock_bh(&q->lock);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ if (ext_phy)
+ qid += __MT_TXQ_MAX;
+
+ if (e->schedule)
+ dev->q_tx[qid].swq_queued--;
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 n_dequeued = 0, n_sw_dequeued = 0;
-
sq = &dev->q_tx[i];
q = sq->q;
- while (q->queued > n_dequeued) {
+ while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
- if (q->entry[q->tail].schedule) {
- q->entry[q->tail].schedule = false;
- n_sw_dequeued++;
- }
-
entry = q->entry[q->tail];
q->entry[q->tail].done = false;
- q->tail = (q->tail + 1) % q->ndesc;
- n_dequeued++;
+ q->entry[q->tail].schedule = false;
- dev->drv->tx_complete_skb(dev, i, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
}
- spin_lock_bh(&q->lock);
-
- sq->swq_queued -= n_sw_dequeued;
- q->queued -= n_dequeued;
-
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
mt76_txq_schedule(&dev->phy, i);
if (dev->drv->tx_status_data &&
if (!q)
continue;
- /* Assure we are in sync with killed tasklet. */
- spin_lock_bh(&q->lock);
- while (q->queued) {
- entry = q->entry[q->tail];
- q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
+ q->entry[q->tail].schedule = false;
- dev->drv->tx_complete_skb(dev, i, &entry);
- }
- spin_unlock_bh(&q->lock);
+ mt76_queue_tx_complete(dev, q, &entry);
}
}