aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorQuan Zhou <quan.zhou@mediatek.com>2024-11-08 20:59:40 +0800
committerFelix Fietkau <nbd@nbd.name>2025-01-14 13:34:35 +0100
commit50f64e4253c57fb6de2a30a693ac7cd73711c3d4 (patch)
treef0154654068b09d89fb7ec79aa68872d50a40c7d
parentwifi: mt76: mt7925: Properly handle responses for commands with events (diff)
downloadwireguard-linux-50f64e4253c57fb6de2a30a693ac7cd73711c3d4.tar.xz
wireguard-linux-50f64e4253c57fb6de2a30a693ac7cd73711c3d4.zip
wifi: mt76: do not hold queue lock during initial rx buffer alloc
In dma init or reset scene, full buffer is needed for all rx rings. Since this is very time consuming, split the function to perform initial allocation without holding the spinlock. This avoids causing excessive scheduler latency. Signed-off-by: Quan Zhou <quan.zhou@mediatek.com> Reviewed-by: Shayne Chen <shayne.chen@mediatek.com> Reviewed-by: Deren Wu <deren.wu@mediatek.com> Link: https://patch.msgid.link/57c68a7ce1dd9022fa5e06af2c53d6313f30ec83.1731069062.git.quan.zhou@mediatek.com Signed-off-by: Felix Fietkau <nbd@nbd.name>
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 5f46d6daeaa7..844af16ee551 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -631,7 +631,8 @@ free_skb:
return ret;
}
-int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+static int
+mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
@@ -640,8 +641,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->ndesc)
return 0;
- spin_lock_bh(&q->lock);
-
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf = {};
enum dma_data_direction dir;
@@ -674,6 +673,19 @@ done:
if (frames || mt76_queue_is_wed_rx(q))
mt76_dma_kick_queue(dev, q);
+ return frames;
+}
+
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
+{
+ int frames;
+
+ if (!q->ndesc)
+ return 0;
+
+ spin_lock_bh(&q->lock);
+ frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
return frames;
@@ -796,7 +808,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
+ mt76_dma_rx_fill_buf(dev, q, false);
}
static void
@@ -969,7 +981,7 @@ mt76_dma_init(struct mt76_dev *dev,
mt76_for_each_q_rx(dev, i) {
netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}