aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek/mt76/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/dma.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c164
1 files changed, 92 insertions, 72 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 76629b98c78d..4381155375e1 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -18,16 +18,20 @@
#include "mt76.h"
#include "dma.h"
-#define DMA_DUMMY_TXWI ((void *) ~0)
-
static int
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
{
int size;
int i;
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
@@ -43,10 +47,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(0, &q->regs->cpu_idx);
- iowrite32(0, &q->regs->dma_idx);
- iowrite32(q->ndesc, &q->regs->ring_size);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(0, &q->regs->cpu_idx);
+ writel(0, &q->regs->dma_idx);
+ writel(q->ndesc, &q->regs->ring_size);
return 0;
}
@@ -61,7 +65,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
int i, idx = -1;
if (txwi)
- q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@@ -120,9 +124,12 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
DMA_TO_DEVICE);
}
- if (e->txwi == DMA_DUMMY_TXWI)
+ if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
+ if (e->skb == DMA_DUMMY_DATA)
+ e->skb = NULL;
+
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
@@ -130,56 +137,64 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(q->ndesc, &q->regs->ring_size);
- q->head = ioread32(&q->regs->dma_idx);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
+ unsigned int n_swq_queued[4] = {};
+ unsigned int n_queued = 0;
bool wake = false;
- int last;
+ int i, last;
- if (!q->ndesc)
+ if (!q)
return;
- spin_lock_bh(&q->lock);
if (flush)
last = -1;
else
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
- while (q->queued && q->tail != last) {
+ while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
- q->swq_queued--;
+ n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ n_queued++;
- if (entry.skb) {
- spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
- spin_lock_bh(&q->lock);
- }
+ if (entry.skb)
+ dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
- mt76_put_txwi(dev, entry.txwi);
+ if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+ mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
}
- if (!flush)
- mt76_txq_schedule(dev, q);
- else
+ spin_lock_bh(&q->lock);
+
+ q->queued -= n_queued;
+ for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[i].swq_queued -= n_swq_queued[i];
+ }
+
+ if (flush)
mt76_dma_sync_idx(dev, q);
wake = wake && q->stopped &&
@@ -244,20 +259,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
return -ENOMEM;
buf.addr = addr;
@@ -271,80 +286,85 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
return 0;
}
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+static int
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
{
+ struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
struct sk_buff *iter;
dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
+ u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
+ txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
+ if (dev->drv->tx_aligned4_skbs)
+ mt76_insert_hdr_pad(skb);
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto free;
- }
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
+ tx_info.buf[n].addr = t->dma_addr;
+ tx_info.buf[n++].len = dev->drv->txwi_size;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
+ if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto unmap;
- buf[n].addr = addr;
- buf[n++].len = iter->len;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = iter->len;
}
+ tx_info.nbuf = n;
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto unmap;
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+ ret = -ENOMEM;
goto unmap;
+ }
- return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+ tx_info.info, tx_info.skb, t);
unmap:
- ret = -ENOMEM;
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ tx_info.buf[n].len, DMA_TO_DEVICE);
free:
- e.skb = skb;
+ e.skb = tx_info.skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
+ dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
-EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
@@ -366,7 +386,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
skb_free_frag(buf);
break;
}