aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek/mt76/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/tx.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c148
1 files changed, 74 insertions, 74 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 2585df512335..5397827668b9 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -21,15 +21,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
+ u8 *txwi;
int size;
- size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
- t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
- if (!t)
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
return t;
@@ -72,13 +74,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
list_add(&t->list, &dev->txwi_cache);
spin_unlock_bh(&dev->lock);
}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
void mt76_tx_free(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
}
@@ -266,7 +269,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
skb_set_queue_mapping(skb, qid);
}
- if (!wcid->tx_rate_set)
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
@@ -283,10 +286,10 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
mt76_check_agg_ssn(mtxq, skb);
}
- q = &dev->q_tx[qid];
+ q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
- dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
@@ -327,7 +330,6 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
{
struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
@@ -335,7 +337,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
- dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
}
void
@@ -346,7 +348,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct mt76_dev *dev = hw->priv;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
spin_lock_bh(&hwq->lock);
@@ -386,12 +388,14 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq, bool *empty)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
- struct ieee80211_tx_info *info;
+ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1, limit;
struct ieee80211_tx_rate tx_rate;
@@ -411,7 +415,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
}
info = IEEE80211_SKB_CB(skb);
- if (!wcid->tx_rate_set)
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
tx_rate = info->control.rates[0];
@@ -423,7 +427,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -458,7 +462,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
txq->sta);
if (idx < 0)
return idx;
@@ -467,8 +471,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
} while (n_frames < limit);
if (!probe) {
- hwq->swq_queued++;
+ hwq->entry[idx].qid = sq - dev->q_tx;
hwq->entry[idx].schedule = true;
+ sq->swq_queued++;
}
dev->queue_ops->kick(dev, hwq);
@@ -477,22 +482,37 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- struct mt76_txq *mtxq, *mtxq_last;
- int len = 0;
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ struct mt76_wcid *wcid;
+ int ret = 0;
-restart:
- mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
- while (!list_empty(&hwq->swq)) {
+ spin_lock_bh(&hwq->lock);
+ while (1) {
bool empty = false;
- int cur;
+
+ if (sq->swq_queued >= 4)
+ break;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
- test_bit(MT76_RESET, &dev->state))
- return -EBUSY;
+ test_bit(MT76_RESET, &dev->state)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ txq = ieee80211_next_txq(dev->hw, qid);
+ if (!txq)
+ break;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ wcid = mtxq->wcid;
+ if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ continue;
- mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -504,38 +524,37 @@ restart:
spin_unlock_bh(&hwq->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
spin_lock_bh(&hwq->lock);
- goto restart;
}
- list_del_init(&mtxq->list);
-
- cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
- if (!empty)
- list_add_tail(&mtxq->list, &hwq->swq);
-
- if (cur < 0)
- return cur;
-
- len += cur;
-
- if (mtxq == mtxq_last)
- break;
+ ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
+ if (skb_queue_empty(&mtxq->retry_q))
+ empty = true;
+ ieee80211_return_txq(dev->hw, txq, !empty);
}
+ spin_unlock_bh(&hwq->lock);
- return len;
+ return ret;
}
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
{
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
+ if (qid >= 4)
+ return;
+
+ if (sq->swq_queued >= 4)
+ return;
+
rcu_read_lock();
- do {
- if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
- break;
- len = mt76_txq_schedule_list(dev, hwq);
+ do {
+ ieee80211_txq_schedule_start(dev->hw, qid);
+ len = mt76_txq_schedule_list(dev, qid);
+ ieee80211_txq_schedule_end(dev->hw, qid);
} while (len > 0);
+
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
@@ -544,13 +563,8 @@ void mt76_txq_schedule_all(struct mt76_dev *dev)
{
int i;
- for (i = 0; i <= MT_TXQ_BK; i++) {
- struct mt76_queue *q = &dev->q_tx[i];
-
- spin_lock_bh(&q->lock);
- mt76_txq_schedule(dev, q);
- spin_unlock_bh(&q->lock);
- }
+ for (i = 0; i <= MT_TXQ_BK; i++)
+ mt76_txq_schedule(dev, i);
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
@@ -561,18 +575,18 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_queue *hwq;
struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ hwq = mtxq->swq->q;
- spin_lock_bh(&mtxq->hwq->lock);
+ spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
- if (!list_empty(&mtxq->list))
- list_del_init(&mtxq->list);
- spin_unlock_bh(&mtxq->hwq->lock);
+ spin_unlock_bh(&hwq->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
@@ -580,36 +594,23 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt76_dev *dev = hw->priv;
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- struct mt76_queue *hwq = mtxq->hwq;
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
return;
- spin_lock_bh(&hwq->lock);
- if (list_empty(&mtxq->list))
- list_add_tail(&mtxq->list, &hwq->swq);
- mt76_txq_schedule(dev, hwq);
- spin_unlock_bh(&hwq->lock);
+ tasklet_schedule(&dev->tx_tasklet);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
- struct mt76_queue *hwq;
struct sk_buff *skb;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
- hwq = mtxq->hwq;
-
- spin_lock_bh(&hwq->lock);
- if (!list_empty(&mtxq->list))
- list_del_init(&mtxq->list);
- spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
ieee80211_free_txskb(dev->hw, skb);
@@ -620,10 +621,9 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- INIT_LIST_HEAD(&mtxq->list);
skb_queue_head_init(&mtxq->retry_q);
- mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+ mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);