aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c')
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c408
1 files changed, 252 insertions, 156 deletions
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 7fc4f0d6a9ad..502e72b7cdcc 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -25,6 +25,8 @@
#include <linux/completion.h>
#include <linux/crc32.h>
#include <linux/spinlock.h>
+#include <linux/circ_buf.h>
+#include <linux/log2.h>
#include "qtn_hw_ids.h"
#include "pcie_bus_priv.h"
@@ -36,17 +38,13 @@ static bool use_msi = true;
module_param(use_msi, bool, 0644);
MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
-static unsigned int tx_bd_size_param = 256;
+static unsigned int tx_bd_size_param = 32;
module_param(tx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
+MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
static unsigned int rx_bd_size_param = 256;
module_param(rx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
-
-static unsigned int rx_bd_reserved_param = 16;
-module_param(rx_bd_reserved_param, uint, 0644);
-MODULE_PARM_DESC(rx_bd_reserved_param, "Reserved RX descriptors");
+MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
static u8 flashboot = 1;
module_param(flashboot, byte, 0644);
@@ -186,8 +184,10 @@ static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
return IOMEM_ERR_PTR(ret);
busaddr = pci_resource_start(priv->pdev, index);
- vaddr = pcim_iomap_table(priv->pdev)[index];
len = pci_resource_len(priv->pdev, index);
+ vaddr = pcim_iomap_table(priv->pdev)[index];
+ if (!vaddr)
+ return IOMEM_ERR_PTR(-ENOMEM);
pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
index, vaddr, &busaddr, (int)len);
@@ -250,19 +250,19 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
int ret = -ENOMEM;
priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
- if (IS_ERR_OR_NULL(priv->sysctl_bar)) {
+ if (IS_ERR(priv->sysctl_bar)) {
pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
return ret;
}
priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
- if (IS_ERR_OR_NULL(priv->dmareg_bar)) {
+ if (IS_ERR(priv->dmareg_bar)) {
pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
return ret;
}
priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
- if (IS_ERR_OR_NULL(priv->epmem_bar)) {
+ if (IS_ERR(priv->epmem_bar)) {
pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
return ret;
}
@@ -274,32 +274,6 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
return 0;
}
-static int
-qtnf_pcie_init_dma_mask(struct qtnf_pcie_bus_priv *priv, u64 dma_mask)
-{
- int ret;
-
- ret = dma_supported(&priv->pdev->dev, dma_mask);
- if (!ret) {
- pr_err("DMA mask %llu not supported\n", dma_mask);
- return ret;
- }
-
- ret = pci_set_dma_mask(priv->pdev, dma_mask);
- if (ret) {
- pr_err("failed to set DMA mask %llu\n", dma_mask);
- return ret;
- }
-
- ret = pci_set_consistent_dma_mask(priv->pdev, dma_mask);
- if (ret) {
- pr_err("failed to set consistent DMA mask %llu\n", dma_mask);
- return ret;
- }
-
- return ret;
-}
-
static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -418,9 +392,8 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
- priv->tx_bd_reclaim_start = 0;
- priv->tx_bd_index = 0;
- priv->tx_queue_len = 0;
+ priv->tx_bd_r_index = 0;
+ priv->tx_bd_w_index = 0;
/* rx bd */
@@ -430,43 +403,34 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
priv->rx_bd_vbase = vaddr;
priv->rx_bd_pbase = paddr;
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
writel(QTN_HOST_HI32(paddr),
PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
- priv->hw_txproc_wr_ptr = priv->rx_bd_num - rx_bd_reserved_param;
-
- writel(priv->hw_txproc_wr_ptr,
- PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
-
pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
- priv->rx_bd_index = 0;
-
return 0;
}
-static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index)
+static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index)
{
struct qtnf_rx_bd *rxbd;
struct sk_buff *skb;
dma_addr_t paddr;
- skb = __dev_alloc_skb(SKB_BUF_SIZE + NET_IP_ALIGN,
- GFP_ATOMIC);
+ skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
if (!skb) {
- priv->rx_skb[rx_bd_index] = NULL;
+ priv->rx_skb[index] = NULL;
return -ENOMEM;
}
- priv->rx_skb[rx_bd_index] = skb;
-
- skb_reserve(skb, NET_IP_ALIGN);
-
- rxbd = &priv->rx_bd_vbase[rx_bd_index];
+ priv->rx_skb[index] = skb;
+ rxbd = &priv->rx_bd_vbase[index];
paddr = pci_map_single(priv->pdev, skb->data,
SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -475,17 +439,24 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index)
return -ENOMEM;
}
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
-
/* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
-
rxbd->info = 0x0;
+ priv->rx_bd_w_index = index;
+
+ /* sync up all descriptor updates */
+ wmb();
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
+
+ writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
return 0;
}
@@ -516,7 +487,7 @@ static void free_xfer_buffers(void *data)
/* free rx buffers */
for (i = 0; i < priv->rx_bd_num; i++) {
- if (priv->rx_skb[i]) {
+ if (priv->rx_skb && priv->rx_skb[i]) {
rxbd = &priv->rx_bd_vbase[i];
paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
le32_to_cpu(rxbd->addr));
@@ -529,19 +500,72 @@ static void free_xfer_buffers(void *data)
/* free tx buffers */
for (i = 0; i < priv->tx_bd_num; i++) {
- if (priv->tx_skb[i]) {
+ if (priv->tx_skb && priv->tx_skb[i]) {
dev_kfree_skb_any(priv->tx_skb[i]);
priv->tx_skb[i] = NULL;
}
}
}
+static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv)
+{
+ u32 val;
+
+ val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base));
+ val |= HHBM_CONFIG_SOFT_RESET;
+ writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
+ usleep_range(50, 100);
+ val &= ~HHBM_CONFIG_SOFT_RESET;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ val |= HHBM_64BIT;
+#endif
+ writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
+ writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base));
+
+ return 0;
+}
+
static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
{
int ret;
+ u32 val;
priv->tx_bd_num = tx_bd_size_param;
priv->rx_bd_num = rx_bd_size_param;
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
+ if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
+ pr_err("tx_bd_size_param %u is not power of two\n",
+ priv->tx_bd_num);
+ return -EINVAL;
+ }
+
+ val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_err("tx_bd_size_param %u is too large\n",
+ priv->tx_bd_num);
+ return -EINVAL;
+ }
+
+ if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
+ pr_err("rx_bd_size_param %u is not power of two\n",
+ priv->rx_bd_num);
+ return -EINVAL;
+ }
+
+ val = priv->rx_bd_num * sizeof(dma_addr_t);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_err("rx_bd_size_param %u is too large\n",
+ priv->rx_bd_num);
+ return -EINVAL;
+ }
+
+ ret = qtnf_hhbm_init(priv);
+ if (ret) {
+ pr_err("failed to init h/w queues\n");
+ return ret;
+ }
ret = alloc_skb_array(priv);
if (ret) {
@@ -564,67 +588,72 @@ static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
return ret;
}
-static int qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
+static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
{
struct qtnf_tx_bd *txbd;
struct sk_buff *skb;
+ unsigned long flags;
dma_addr_t paddr;
- int last_sent;
- int count;
+ u32 tx_done_index;
+ int count = 0;
int i;
- last_sent = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
- % priv->tx_bd_num;
- i = priv->tx_bd_reclaim_start;
- count = 0;
+ spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
- while (i != last_sent) {
- skb = priv->tx_skb[i];
- if (!skb)
- break;
+ tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
+ & (priv->tx_bd_num - 1);
- txbd = &priv->tx_bd_vbase[i];
- paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
- le32_to_cpu(txbd->addr));
- pci_unmap_single(priv->pdev, paddr, skb->len, PCI_DMA_TODEVICE);
+ i = priv->tx_bd_r_index;
- if (skb->dev) {
- skb->dev->stats.tx_packets++;
- skb->dev->stats.tx_bytes += skb->len;
+ while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
+ skb = priv->tx_skb[i];
+ if (likely(skb)) {
+ txbd = &priv->tx_bd_vbase[i];
+ paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
+ le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (skb->dev) {
+ skb->dev->stats.tx_packets++;
+ skb->dev->stats.tx_bytes += skb->len;
+
+ if (netif_queue_stopped(skb->dev))
+ netif_wake_queue(skb->dev);
+ }
- if (netif_queue_stopped(skb->dev))
- netif_wake_queue(skb->dev);
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
priv->tx_skb[i] = NULL;
- priv->tx_queue_len--;
count++;
if (++i >= priv->tx_bd_num)
i = 0;
}
- priv->tx_bd_reclaim_start = i;
priv->tx_reclaim_done += count;
priv->tx_reclaim_req++;
+ priv->tx_bd_r_index = i;
- return count;
+ spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
}
-static bool qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
+static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
{
- if (priv->tx_queue_len >= priv->tx_bd_num - 1) {
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
pr_err_ratelimited("reclaim full Tx queue\n");
qtnf_pcie_data_tx_reclaim(priv);
- if (priv->tx_queue_len >= priv->tx_bd_num - 1) {
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
priv->tx_full_count++;
- return false;
+ return 0;
}
}
- return true;
+ return 1;
}
static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
@@ -632,24 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
dma_addr_t txbd_paddr, skb_paddr;
struct qtnf_tx_bd *txbd;
- unsigned long flags;
int len, i;
u32 info;
int ret = 0;
- spin_lock_irqsave(&priv->tx_lock, flags);
-
- priv->tx_done_count++;
-
if (!qtnf_tx_queue_ready(priv)) {
if (skb->dev)
netif_stop_queue(skb->dev);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_BUSY;
}
- i = priv->tx_bd_index;
+ i = priv->tx_bd_w_index;
priv->tx_skb[i] = skb;
len = skb->len;
@@ -673,16 +696,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
/* write new TX descriptor to PCIE_RX_FIFO on EP */
txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
- writel(QTN_HOST_LO32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
writel(QTN_HOST_HI32(txbd_paddr),
PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(txbd_paddr),
+ PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
if (++i >= priv->tx_bd_num)
i = 0;
- priv->tx_bd_index = i;
- priv->tx_queue_len++;
+ priv->tx_bd_w_index = i;
tx_done:
if (ret && skb) {
@@ -692,7 +717,8 @@ tx_done:
dev_kfree_skb_any(skb);
}
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ qtnf_pcie_data_tx_reclaim(priv);
+ priv->tx_done_count++;
return NETDEV_TX_OK;
}
@@ -719,14 +745,21 @@ static irqreturn_t qtnf_interrupt(int irq, void *data)
if (!(status & priv->pcie_irq_mask))
goto irq_done;
- if (status & PCIE_HDP_INT_RX_BITS) {
+ if (status & PCIE_HDP_INT_RX_BITS)
priv->pcie_irq_rx_count++;
+
+ if (status & PCIE_HDP_INT_TX_BITS)
+ priv->pcie_irq_tx_count++;
+
+ if (status & PCIE_HDP_INT_HHBM_UF)
+ priv->pcie_irq_uf_count++;
+
+ if (status & PCIE_HDP_INT_RX_BITS) {
qtnf_dis_rxdone_irq(priv);
napi_schedule(&bus->mux_napi);
}
if (status & PCIE_HDP_INT_TX_BITS) {
- priv->pcie_irq_tx_count++;
qtnf_dis_txdone_irq(priv);
tasklet_hi_schedule(&priv->reclaim_tq);
}
@@ -741,16 +774,19 @@ irq_done:
return IRQ_HANDLED;
}
-static inline void hw_txproc_wr_ptr_inc(struct qtnf_pcie_bus_priv *priv)
+static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv)
{
- u32 index;
+ u16 index = priv->rx_bd_r_index;
+ struct qtnf_rx_bd *rxbd;
+ u32 descw;
- index = priv->hw_txproc_wr_ptr;
+ rxbd = &priv->rx_bd_vbase[index];
+ descw = le32_to_cpu(rxbd->info);
- if (++index >= priv->rx_bd_num)
- index = 0;
+ if (descw & QTN_TXDONE_MASK)
+ return 1;
- priv->hw_txproc_wr_ptr = index;
+ return 0;
}
static int qtnf_rx_poll(struct napi_struct *napi, int budget)
@@ -762,64 +798,96 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget)
int processed = 0;
struct qtnf_rx_bd *rxbd;
dma_addr_t skb_paddr;
+ int consume;
u32 descw;
- u16 index;
+ u32 psize;
+ u16 r_idx;
+ u16 w_idx;
int ret;
- index = priv->rx_bd_index;
- rxbd = &priv->rx_bd_vbase[index];
+ while (processed < budget) {
- descw = le32_to_cpu(rxbd->info);
- while ((descw & QTN_TXDONE_MASK) && (processed < budget)) {
- skb = priv->rx_skb[index];
+ if (!qtnf_rx_data_ready(priv))
+ goto rx_out;
- if (likely(skb)) {
- skb_put(skb, QTN_GET_LEN(descw));
+ r_idx = priv->rx_bd_r_index;
+ rxbd = &priv->rx_bd_vbase[r_idx];
+ descw = le32_to_cpu(rxbd->info);
+
+ skb = priv->rx_skb[r_idx];
+ psize = QTN_GET_LEN(descw);
+ consume = 1;
+
+ if (!(descw & QTN_TXDONE_MASK)) {
+ pr_warn("skip invalid rxbd[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (!skb) {
+ pr_warn("skip missing rx_skb[%d]\n", r_idx);
+ consume = 0;
+ }
+ if (skb && (skb_tailroom(skb) < psize)) {
+ pr_err("skip packet with invalid length: %u > %u\n",
+ psize, skb_tailroom(skb));
+ consume = 0;
+ }
+
+ if (skb) {
skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
le32_to_cpu(rxbd->addr));
pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
PCI_DMA_FROMDEVICE);
+ }
+ if (consume) {
+ skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
- netif_receive_skb(skb);
+ napi_gro_receive(napi, skb);
} else {
pr_debug("drop untagged skb\n");
bus->mux_dev.stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
-
- processed++;
} else {
- pr_err("missing rx_skb[%d]\n", index);
+ if (skb) {
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
}
- /* attached rx buffer is passed upstream: map a new one */
- ret = skb2rbd_attach(priv, index);
- if (likely(!ret)) {
- if (++index >= priv->rx_bd_num)
- index = 0;
+ priv->rx_skb[r_idx] = NULL;
+ if (++r_idx >= priv->rx_bd_num)
+ r_idx = 0;
- priv->rx_bd_index = index;
- hw_txproc_wr_ptr_inc(priv);
+ priv->rx_bd_r_index = r_idx;
- rxbd = &priv->rx_bd_vbase[index];
- descw = le32_to_cpu(rxbd->info);
- } else {
- pr_err("failed to allocate new rx_skb[%d]\n", index);
- break;
+ /* repalce processed buffer by a new one */
+ w_idx = priv->rx_bd_w_index;
+ while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num) > 0) {
+ if (++w_idx >= priv->rx_bd_num)
+ w_idx = 0;
+
+ ret = skb2rbd_attach(priv, w_idx);
+ if (ret) {
+ pr_err("failed to allocate new rx_skb[%d]\n",
+ w_idx);
+ break;
+ }
}
- writel(priv->hw_txproc_wr_ptr,
- PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
+ processed++;
}
+rx_out:
if (processed < budget) {
napi_complete(napi);
qtnf_en_rxdone_irq(priv);
@@ -1058,11 +1126,8 @@ static int qtnf_bringup_fw(struct qtnf_bus *bus)
static void qtnf_reclaim_tasklet_fn(unsigned long data)
{
struct qtnf_pcie_bus_priv *priv = (void *)data;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_lock, flags);
qtnf_pcie_data_tx_reclaim(priv);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
qtnf_en_txdone_irq(priv);
}
@@ -1090,10 +1155,22 @@ static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
{
struct qtnf_bus *bus = dev_get_drvdata(s->private);
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ u32 status;
seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count);
seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count);
+ status = reg & PCIE_HDP_INT_TX_BITS;
+ seq_printf(s, "pcie_irq_tx_status(%s)\n",
+ (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count);
+ status = reg & PCIE_HDP_INT_RX_BITS;
+ seq_printf(s, "pcie_irq_rx_status(%s)\n",
+ (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
+ seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count);
+ status = reg & PCIE_HDP_INT_HHBM_UF;
+ seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
+ (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
return 0;
}
@@ -1107,10 +1184,24 @@ static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
- seq_printf(s, "tx_bd_reclaim_start(%u)\n", priv->tx_bd_reclaim_start);
- seq_printf(s, "tx_bd_index(%u)\n", priv->tx_bd_index);
- seq_printf(s, "rx_bd_index(%u)\n", priv->rx_bd_index);
- seq_printf(s, "tx_queue_len(%u)\n", priv->tx_queue_len);
+
+ seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
+ seq_printf(s, "tx_bd_p_index(%u)\n",
+ readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
+ & (priv->tx_bd_num - 1));
+ seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
+ seq_printf(s, "tx queue len(%u)\n",
+ CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num));
+
+ seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
+ seq_printf(s, "rx_bd_p_index(%u)\n",
+ readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base))
+ & (priv->rx_bd_num - 1));
+ seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
+ seq_printf(s, "rx alloc queue len(%u)\n",
+ CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num));
return 0;
}
@@ -1157,7 +1248,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
init_completion(&bus->request_firmware_complete);
mutex_init(&bus->bus_lock);
spin_lock_init(&pcie_priv->irq_lock);
- spin_lock_init(&pcie_priv->tx_lock);
+ spin_lock_init(&pcie_priv->tx_reclaim_lock);
/* init stats */
pcie_priv->tx_full_count = 0;
@@ -1165,6 +1256,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->pcie_irq_rx_count = 0;
pcie_priv->pcie_irq_tx_count = 0;
+ pcie_priv->pcie_irq_uf_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
@@ -1191,6 +1283,16 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pr_debug("successful init of PCI device %x\n", pdev->device);
}
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+#else
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+#endif
+ if (ret) {
+ pr_err("PCIE DMA coherent mask init failed\n");
+ goto err_base;
+ }
+
pcim_pin_device(pdev);
pci_set_master(pdev);
@@ -1212,12 +1314,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_base;
}
- ret = qtnf_pcie_init_dma_mask(pcie_priv, DMA_BIT_MASK(32));
- if (ret) {
- pr_err("PCIE DMA mask init failed\n");
- goto err_base;
- }
-
ret = devm_add_action(&pdev->dev, free_xfer_buffers, (void *)pcie_priv);
if (ret) {
pr_err("custom release callback init failed\n");
@@ -1336,7 +1432,7 @@ static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
qtnf_pcie_resume);
#endif
-static struct pci_device_id qtnf_pcie_devid_table[] = {
+static const struct pci_device_id qtnf_pcie_devid_table[] = {
{
PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,