if (entry.schedule)
q->swq_queued--;
- if (entry.skb)
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ if (entry.skb) {
+ spin_unlock_bh(&q->lock);
dev->drv->tx_complete_skb(dev, q, &entry, flush);
+ spin_lock_bh(&q->lock);
+ }
if (entry.txwi) {
mt76_put_txwi(dev, entry.txwi);
wake = true;
}
- q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
-
if (!flush && q->tail == last)
last = ioread32(&q->regs->dma_idx);
}
{
struct ieee80211_hw *hw = dev->hw;
- mt76_tx_status_flush(dev, NULL);
+ mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
mt76_tx_free(dev);
}
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key);
+
+void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __acquires(&dev->status_list.lock);
+void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __releases(&dev->status_list.lock);
+
int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb);
struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
- struct mt76_wcid *wcid, int pktid);
-void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb);
+ struct mt76_wcid *wcid, int pktid,
+ struct sk_buff_head *list);
+void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+ struct sk_buff_head *list);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
-
-static inline void
-mt76_tx_status_check(struct mt76_dev *dev)
-{
- spin_lock_bh(&dev->status_list.lock);
- mt76_tx_status_skb_get(dev, NULL, 0);
- spin_unlock_bh(&dev->status_list.lock);
-}
-
-static inline void
-mt76_tx_status_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
-{
- spin_lock_bh(&dev->status_list.lock);
- mt76_tx_status_skb_get(dev, wcid, -1);
- spin_unlock_bh(&dev->status_list.lock);
-}
+void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ bool flush);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
struct mt76_wcid *wcid = NULL;
struct mt76x02_sta *msta = NULL;
struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
if (stat->pktid == MT_PACKET_ID_NO_ACK)
return;
rcu_read_lock();
- spin_lock_bh(&mdev->status_list.lock);
+ mt76_tx_status_lock(mdev, &list);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
if (wcid) {
if (stat->pktid)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
- stat->pktid);
+ stat->pktid, &list);
if (status.skb)
status.info = IEEE80211_SKB_CB(status.skb);
}
}
if (status.skb)
- mt76_tx_status_skb_done(mdev, status.skb);
+ mt76_tx_status_skb_done(mdev, status.skb, &list);
else
ieee80211_tx_status_ext(mt76_hw(dev), &status);
out:
- spin_unlock_bh(&mdev->status_list.lock);
+ mt76_tx_status_unlock(mdev, &list);
rcu_read_unlock();
}
if (!dev->beacon_mask)
mt76x02_check_mac_err(dev);
- mt76_tx_status_check(&dev->mt76);
+ mt76_tx_status_check(&dev->mt76, NULL, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
int i;
mutex_lock(&dev->mt76.mutex);
- mt76_tx_status_flush(&dev->mt76, &msta->wcid);
+ mt76_tx_status_check(&dev->mt76, &msta->wcid, true);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(&dev->mt76, sta->txq[i]);
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
}
+void
+mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __acquires(&dev->status_list.lock)
+{
+ __skb_queue_head_init(list);
+ spin_lock_bh(&dev->status_list.lock);
+ __acquire(&dev->status_list.lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
+
+void
+mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __releases(&dev->status_list.unlock)
+{
+ struct sk_buff *skb;
+
+ spin_unlock_bh(&dev->status_list.lock);
+ __release(&dev->status_list.unlock);
+
+ while ((skb = __skb_dequeue(list)) != NULL)
+ ieee80211_tx_status(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
+
static void
-__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags)
+__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
+ struct sk_buff_head *list)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
info->flags |= IEEE80211_TX_STAT_ACK;
}
- ieee80211_tx_status(dev->hw, skb);
+ __skb_queue_tail(list, skb);
}
void
-mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb)
+mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+ struct sk_buff_head *list)
{
- __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE);
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
struct sk_buff *
-mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid)
+mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
+ struct sk_buff_head *list)
{
struct sk_buff *skb, *tmp;
continue;
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
- MT_TX_CB_TXS_DONE);
+ MT_TX_CB_TXS_DONE, list);
}
return NULL;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
+void
+mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
+{
+ struct sk_buff_head list;
+
+ mt76_tx_status_lock(dev, &list);
+ mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
+ mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_check);
+
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
+ struct sk_buff_head list;
+
if (!skb->prev) {
ieee80211_free_txskb(dev->hw, skb);
return;
}
- spin_lock_bh(&dev->status_list.lock);
- __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE);
- spin_unlock_bh(&dev->status_list.lock);
+ mt76_tx_status_lock(dev, &list);
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
+ mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);