aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/pci.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c1636
1 files changed, 696 insertions, 940 deletions
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3376963a4862..59e0ea83be50 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -44,13 +44,9 @@ enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_WARM_ONLY = 1,
};
-static unsigned int ath10k_pci_target_ps;
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
-module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
-MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
-
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
@@ -68,13 +64,7 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
{0}
};
-static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
- u32 *data);
-
-static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
- int num);
-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
static int ath10k_pci_warm_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
@@ -156,79 +146,175 @@ static const struct ce_attr host_ce_config_wlan[] = {
static const struct ce_pipe_config target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
- .pipenum = 0,
- .pipedir = PIPEDIR_OUT,
- .nentries = 32,
- .nbytes_max = 256,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
- .pipenum = 1,
- .pipedir = PIPEDIR_IN,
- .nentries = 32,
- .nbytes_max = 512,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(512),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
- .pipenum = 2,
- .pipedir = PIPEDIR_IN,
- .nentries = 32,
- .nbytes_max = 2048,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
- .pipenum = 3,
- .pipedir = PIPEDIR_OUT,
- .nentries = 32,
- .nbytes_max = 2048,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
- .pipenum = 4,
- .pipedir = PIPEDIR_OUT,
- .nentries = 256,
- .nbytes_max = 256,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* NB: 50% of src nentries, since tx has 2 frags */
/* CE5: unused */
{
- .pipenum = 5,
- .pipedir = PIPEDIR_OUT,
- .nentries = 32,
- .nbytes_max = 2048,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
- .pipenum = 6,
- .pipedir = PIPEDIR_INOUT,
- .nentries = 32,
- .nbytes_max = 4096,
- .flags = CE_ATTR_FLAGS,
- .reserved = 0,
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(4096),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
};
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
static bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@@ -254,8 +340,8 @@ static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
/* IMPORTANT: this extra read transaction is required to
* flush the posted write buffer. */
- (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_ENABLE_ADDRESS);
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
}
static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
@@ -266,48 +352,116 @@ static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
/* IMPORTANT: this extra read transaction is required to
* flush the posted write buffer. */
- (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_ENABLE_ADDRESS);
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
}
-static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
+static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
{
- struct ath10k *ar = arg;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- if (ar_pci->num_msi_intrs == 0) {
- if (!ath10k_pci_irq_pending(ar))
- return IRQ_NONE;
-
- ath10k_pci_disable_and_clear_legacy_irq(ar);
- }
+ if (ar_pci->num_msi_intrs > 1)
+ return "msi-x";
- tasklet_schedule(&ar_pci->early_irq_tasklet);
+ if (ar_pci->num_msi_intrs == 1)
+ return "msi";
- return IRQ_HANDLED;
+ return "legacy";
}
-static int ath10k_pci_request_early_irq(struct ath10k *ar)
+static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
{
+ struct ath10k *ar = pipe->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
int ret;
- /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
- * interrupt from irq vector is triggered in all cases for FW
- * indication/errors */
- ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
- IRQF_SHARED, "ath10k_pci (early)", ar);
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map pci rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_CB(skb)->paddr = paddr;
+
+ ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
if (ret) {
- ath10k_warn("failed to request early irq: %d\n", ret);
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
-static void ath10k_pci_free_early_irq(struct ath10k *ar)
+static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ while (num--) {
+ ret = __ath10k_pci_rx_post_buf(pipe);
+ if (ret) {
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+ mod_timer(&ar_pci->rx_post_retry, jiffies +
+ ATH10K_PCI_RX_POST_RETRY_MS);
+ break;
+ }
+ }
+}
+
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ __ath10k_pci_rx_post_pipe(pipe);
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static void ath10k_pci_rx_post(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ for (i = 0; i < CE_COUNT; i++)
+ __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
{
- free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
+ struct ath10k *ar = (void *)ptr;
+
+ ath10k_pci_rx_post(ar);
}
/*
@@ -331,25 +485,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
void *data_buf = NULL;
int i;
- /*
- * This code cannot handle reads to non-memory space. Redirect to the
- * register read fn but preserve the multi word read capability of
- * this fn
- */
- if (address < DRAM_BASE_ADDRESS) {
- if (!IS_ALIGNED(address, 4) ||
- !IS_ALIGNED((unsigned long)data, 4))
- return -EIO;
-
- while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
- ar, address, (u32 *)data)) == 0)) {
- nbytes -= sizeof(u32);
- address += sizeof(u32);
- data += sizeof(u32);
- }
- return ret;
- }
-
ce_diag = ar_pci->ce_diag;
/*
@@ -376,7 +511,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
+ ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
if (ret != 0)
goto done;
@@ -389,13 +524,11 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* convert it from Target CPU virtual address space
* to CE address space
*/
- ath10k_pci_wake(ar);
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
address);
- ath10k_pci_sleep(ar);
ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
- 0);
+ 0);
if (ret)
goto done;
@@ -415,7 +548,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
- if (buf != (u32) address) {
+ if (buf != (u32)address) {
ret = -EIO;
goto done;
}
@@ -448,15 +581,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
done:
- if (ret == 0) {
- /* Copy data from allocated DMA buf to caller's buf */
- WARN_ON_ONCE(orig_nbytes & 3);
- for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
- ((u32 *)data)[i] =
- __le32_to_cpu(((__le32 *)data_buf)[i]);
- }
- } else
- ath10k_warn("failed to read diag value at 0x%x: %d\n",
+ if (ret == 0)
+ memcpy(data, data_buf, orig_nbytes);
+ else
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
address, ret);
if (data_buf)
@@ -466,20 +594,45 @@ done:
return ret;
}
-/* Read 4-byte aligned data from Target memory or register */
-static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
- u32 *data)
+static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
{
- /* Assume range doesn't cross this boundary */
- if (address >= DRAM_BASE_ADDRESS)
- return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
+ __le32 val = 0;
+ int ret;
+
+ ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
+ *value = __le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
+ u32 src, u32 len)
+{
+ u32 host_addr, addr;
+ int ret;
+
+ host_addr = host_interest_item_address(src);
+
+ ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
+ src, ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
+ addr, len, ret);
+ return ret;
+ }
- ath10k_pci_wake(ar);
- *data = ath10k_pci_read32(ar, address);
- ath10k_pci_sleep(ar);
return 0;
}
+#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
+ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
+
static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
const void *data, int nbytes)
{
@@ -514,9 +667,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
/* Copy caller's data to allocated DMA buf */
- WARN_ON_ONCE(orig_nbytes & 3);
- for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
- ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
+ memcpy(data_buf, data, orig_nbytes);
/*
* The address supplied by the caller is in the
@@ -528,9 +679,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* to
* CE address space
*/
- ath10k_pci_wake(ar);
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
- ath10k_pci_sleep(ar);
remaining_bytes = orig_nbytes;
ce_data = ce_data_base;
@@ -539,7 +688,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
+ ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
if (ret != 0)
goto done;
@@ -547,7 +696,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
+ ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data,
nbytes, 0, 0);
if (ret != 0)
goto done;
@@ -608,66 +757,34 @@ done:
}
if (ret != 0)
- ath10k_warn("failed to write diag value at 0x%x: %d\n",
+ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
return ret;
}
-/* Write 4B data to Target memory or register */
-static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
- u32 data)
+static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
{
- /* Assume range doesn't cross this boundary */
- if (address >= DRAM_BASE_ADDRESS)
- return ath10k_pci_diag_write_mem(ar, address, &data,
- sizeof(u32));
+ __le32 val = __cpu_to_le32(value);
- ath10k_pci_wake(ar);
- ath10k_pci_write32(ar, address, data);
- ath10k_pci_sleep(ar);
- return 0;
+ return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
}
-static bool ath10k_pci_target_is_awake(struct ath10k *ar)
+static bool ath10k_pci_is_awake(struct ath10k *ar)
{
- void __iomem *mem = ath10k_pci_priv(ar)->mem;
- u32 val;
- val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
- RTC_STATE_ADDRESS);
- return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
+ u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
+
+ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
}
-int ath10k_do_pci_wake(struct ath10k *ar)
+static int ath10k_pci_wake_wait(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *pci_addr = ar_pci->mem;
int tot_delay = 0;
int curr_delay = 5;
- if (atomic_read(&ar_pci->keep_awake_count) == 0) {
- /* Force AWAKE */
- iowrite32(PCIE_SOC_WAKE_V_MASK,
- pci_addr + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
- }
- atomic_inc(&ar_pci->keep_awake_count);
-
- if (ar_pci->verified_awake)
- return 0;
-
- for (;;) {
- if (ath10k_pci_target_is_awake(ar)) {
- ar_pci->verified_awake = true;
+ while (tot_delay < PCIE_WAKE_TIMEOUT) {
+ if (ath10k_pci_is_awake(ar))
return 0;
- }
-
- if (tot_delay > PCIE_WAKE_TIMEOUT) {
- ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
- PCIE_WAKE_TIMEOUT,
- atomic_read(&ar_pci->keep_awake_count));
- return -ETIMEDOUT;
- }
udelay(curr_delay);
tot_delay += curr_delay;
@@ -675,20 +792,21 @@ int ath10k_do_pci_wake(struct ath10k *ar)
if (curr_delay < 50)
curr_delay += 5;
}
+
+ return -ETIMEDOUT;
}
-void ath10k_do_pci_sleep(struct ath10k *ar)
+static int ath10k_pci_wake(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- void __iomem *pci_addr = ar_pci->mem;
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
+ PCIE_SOC_WAKE_V_MASK);
+ return ath10k_pci_wake_wait(ar);
+}
- if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
- /* Allow sleep */
- ar_pci->verified_awake = false;
- iowrite32(PCIE_SOC_WAKE_RESET,
- pci_addr + PCIE_LOCAL_BASE_ADDRESS +
- PCIE_SOC_WAKE_ADDRESS);
- }
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
+ PCIE_SOC_WAKE_RESET);
}
/* Called by lower (CE) layer when a send to Target completes. */
@@ -726,19 +844,17 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
- int err, num_replenish = 0;
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
- num_replenish++;
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
- ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -748,12 +864,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
cb->rx_completion(ar, skb, pipe_info->pipe_num);
}
- err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
- if (unlikely(err)) {
- /* FIXME: retry */
- ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
- pipe_info->pipe_num, num_replenish, err);
- }
+ ath10k_pci_rx_post_pipe(pipe_info);
}
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -781,10 +892,10 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
}
for (i = 0; i < n_items - 1; i++) {
- ath10k_dbg(ATH10K_DBG_PCI,
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
i, items[i].paddr, items[i].len, n_items);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
err = ath10k_ce_send_nolock(ce_pipe,
@@ -799,10 +910,10 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
/* `i` is equal to `n_items -1` after for() */
- ath10k_dbg(ATH10K_DBG_PCI,
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
i, items[i].paddr, items[i].len, n_items);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
err = ath10k_ce_send_nolock(ce_pipe,
@@ -829,52 +940,64 @@ static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
-static void ath10k_pci_hif_dump_area(struct ath10k *ar)
+static void ath10k_pci_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
{
- u32 reg_dump_area = 0;
- u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
- u32 host_addr;
- int ret;
- u32 i;
+ __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
- ath10k_err("firmware crashed!\n");
- ath10k_err("hardware name %s version 0x%x\n",
- ar->hw_params.name, ar->target_version);
- ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
+ lockdep_assert_held(&ar->data_lock);
- host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
- ret = ath10k_pci_diag_read_mem(ar, host_addr,
- &reg_dump_area, sizeof(u32));
+ ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
+ hi_failure_state,
+ REG_DUMP_COUNT_QCA988X * sizeof(__le32));
if (ret) {
- ath10k_err("failed to read FW dump area address: %d\n", ret);
- return;
- }
-
- ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
-
- ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
- &reg_dump_values[0],
- REG_DUMP_COUNT_QCA988X * sizeof(u32));
- if (ret != 0) {
- ath10k_err("failed to read FW dump area: %d\n", ret);
+ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
return;
}
BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
- ath10k_err("target Register Dump\n");
+ ath10k_err(ar, "firmware register dump:\n");
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
- ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
i,
- reg_dump_values[i],
- reg_dump_values[i + 1],
- reg_dump_values[i + 2],
- reg_dump_values[i + 3]);
+ __le32_to_cpu(reg_dump_values[i]),
+ __le32_to_cpu(reg_dump_values[i + 1]),
+ __le32_to_cpu(reg_dump_values[i + 2]),
+ __le32_to_cpu(reg_dump_values[i + 3]));
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
+ crash_data->registers[i] = reg_dump_values[i];
+}
+
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char uuid[50];
+
+ spin_lock_bh(&ar->data_lock);
+
+ crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+
+ if (crash_data)
+ scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
+ else
+ scnprintf(uuid, sizeof(uuid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
+ ath10k_print_driver_info(ar);
+ ath10k_pci_dump_registers(ar, crash_data);
+
+ spin_unlock_bh(&ar->data_lock);
queue_work(ar->workqueue, &ar->restart_work);
}
@@ -882,7 +1005,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) {
int resources;
@@ -910,43 +1033,12 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current));
}
-static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- const struct ce_attr *attr;
- struct ath10k_pci_pipe *pipe_info;
- int pipe_num, disable_interrupts;
-
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
-
- /* Handle Diagnostic CE specially */
- if (pipe_info->ce_hdl == ar_pci->ce_diag)
- continue;
-
- attr = &host_ce_config_wlan[pipe_num];
-
- if (attr->src_nentries) {
- disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
- ath10k_pci_ce_send_done,
- disable_interrupts);
- }
-
- if (attr->dest_nentries)
- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
- ath10k_pci_ce_recv_data);
- }
-
- return 0;
-}
-
static void ath10k_pci_kill_tasklet(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -954,82 +1046,72 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
tasklet_kill(&ar_pci->intr_tq);
tasklet_kill(&ar_pci->msi_fw_err);
- tasklet_kill(&ar_pci->early_irq_tasklet);
for (i = 0; i < CE_COUNT; i++)
tasklet_kill(&ar_pci->pipe_info[i].intr);
+
+ del_timer_sync(&ar_pci->rx_post_retry);
}
-/* TODO - temporary mapping while we have too few CE's */
static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id, u8 *ul_pipe,
u8 *dl_pipe, int *ul_is_polled,
int *dl_is_polled)
{
- int ret = 0;
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
/* polling for received messages not supported */
*dl_is_polled = 0;
- switch (service_id) {
- case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
- /*
- * Host->target HTT gets its own pipe, so it can be polled
- * while other pipes are interrupt driven.
- */
- *ul_pipe = 4;
- /*
- * Use the same target->host pipe for HTC ctrl, HTC raw
- * streams, and HTT.
- */
- *dl_pipe = 1;
- break;
-
- case ATH10K_HTC_SVC_ID_RSVD_CTRL:
- case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
- /*
- * Note: HTC_RAW_STREAMS_SVC is currently unused, and
- * HTC_CTRL_RSVD_SVC could share the same pipe as the
- * WMI services. So, if another CE is needed, change
- * this to *ul_pipe = 3, which frees up CE 0.
- */
- /* *ul_pipe = 3; */
- *ul_pipe = 0;
- *dl_pipe = 1;
- break;
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
- case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
- case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
- case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
- case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
- case ATH10K_HTC_SVC_ID_WMI_CONTROL:
- *ul_pipe = 3;
- *dl_pipe = 2;
- break;
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
- /* pipe 5 unused */
- /* pipe 6 reserved */
- /* pipe 7 reserved */
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
- default:
- ret = -1;
- break;
- }
*ul_is_polled =
(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
- return ret;
+ return 0;
}
static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
- u8 *ul_pipe, u8 *dl_pipe)
+ u8 *ul_pipe, u8 *dl_pipe)
{
int ul_is_polled, dl_is_polled;
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
@@ -1039,141 +1121,34 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
- int num)
+static void ath10k_pci_irq_disable(struct ath10k *ar)
{
- struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
- struct sk_buff *skb;
- dma_addr_t ce_data;
- int i, ret = 0;
-
- if (pipe_info->buf_sz == 0)
- return 0;
-
- for (i = 0; i < num; i++) {
- skb = dev_alloc_skb(pipe_info->buf_sz);
- if (!skb) {
- ath10k_warn("failed to allocate skbuff for pipe %d\n",
- num);
- ret = -ENOMEM;
- goto err;
- }
-
- WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
-
- ce_data = dma_map_single(ar->dev, skb->data,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
- ath10k_warn("failed to DMA map sk_buff\n");
- dev_kfree_skb_any(skb);
- ret = -EIO;
- goto err;
- }
-
- ATH10K_SKB_CB(skb)->paddr = ce_data;
-
- pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
- pipe_info->buf_sz,
- PCI_DMA_FROMDEVICE);
-
- ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
- ce_data);
- if (ret) {
- ath10k_warn("failed to enqueue to pipe %d: %d\n",
- num, ret);
- goto err;
- }
- }
+ int i;
- return ret;
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ /* FIXME: How to mask all MSI interrupts? */
-err:
- ath10k_pci_rx_pipe_cleanup(pipe_info);
- return ret;
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ synchronize_irq(ar_pci->pdev->irq + i);
}
-static int ath10k_pci_post_rx(struct ath10k *ar)
+static void ath10k_pci_irq_enable(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info;
- const struct ce_attr *attr;
- int pipe_num, ret = 0;
-
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- attr = &host_ce_config_wlan[pipe_num];
-
- if (attr->dest_nentries == 0)
- continue;
-
- ret = ath10k_pci_post_rx_pipe(pipe_info,
- attr->dest_nentries - 1);
- if (ret) {
- ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
- pipe_num, ret);
-
- for (; pipe_num >= 0; pipe_num--) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- ath10k_pci_rx_pipe_cleanup(pipe_info);
- }
- return ret;
- }
- }
-
- return 0;
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+ /* FIXME: How to unmask all MSI interrupts? */
}
static int ath10k_pci_hif_start(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret, ret_early;
-
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
- ath10k_pci_free_early_irq(ar);
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_irq_enable(ar);
+ ath10k_pci_rx_post(ar);
- ret = ath10k_pci_request_irq(ar);
- if (ret) {
- ath10k_warn("failed to post RX buffers for all pipes: %d\n",
- ret);
- goto err_early_irq;
- }
-
- ret = ath10k_pci_setup_ce_irq(ar);
- if (ret) {
- ath10k_warn("failed to setup CE interrupts: %d\n", ret);
- goto err_stop;
- }
-
- /* Post buffers once to start things off. */
- ret = ath10k_pci_post_rx(ar);
- if (ret) {
- ath10k_warn("failed to post RX buffers for all pipes: %d\n",
- ret);
- goto err_stop;
- }
-
- ar_pci->started = 1;
return 0;
-
-err_stop:
- ath10k_ce_disable_interrupts(ar);
- ath10k_pci_free_irq(ar);
- ath10k_pci_kill_tasklet(ar);
-err_early_irq:
- /* Though there should be no interrupts (device was reset)
- * power_down() expects the early IRQ to be installed as per the
- * driver lifecycle. */
- ret_early = ath10k_pci_request_early_irq(ar);
- if (ret_early)
- ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
-
- return ret;
}
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
@@ -1193,10 +1168,6 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
ar = pipe_info->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
-
- if (!ar_pci->started)
- return;
-
ce_hdl = pipe_info->ce_hdl;
while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
@@ -1227,10 +1198,6 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
ar = pipe_info->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
-
- if (!ar_pci->started)
- return;
-
ce_hdl = pipe_info->ce_hdl;
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
@@ -1275,41 +1242,31 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
ath10k_ce_deinit_pipe(ar, i);
}
-static void ath10k_pci_hif_stop(struct ath10k *ar)
+static void ath10k_pci_flush(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret;
-
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
-
- if (WARN_ON(!ar_pci->started))
- return;
-
- ret = ath10k_ce_disable_interrupts(ar);
- if (ret)
- ath10k_warn("failed to disable CE interrupts: %d\n", ret);
-
- ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
-
- ret = ath10k_pci_request_early_irq(ar);
- if (ret)
- ath10k_warn("failed to re-enable early irq: %d\n", ret);
-
- /* At this point, asynchronous threads are stopped, the target should
- * not DMA nor interrupt. We process the leftovers and then free
- * everything else up. */
-
ath10k_pci_buffer_cleanup(ar);
+}
- /* Make the sure the device won't access any structures on the host by
- * resetting it. The device was fed with PCI CE ringbuffer
- * configuration during init. If ringbuffers are freed and the device
- * were to access them this could lead to memory corruption on the
- * host. */
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+
+ /* Most likely the device has HTT Rx ring configured. The only way to
+ * prevent the device from accessing (and possible corrupting) host
+ * memory is to reset the chip now.
+ *
+ * There's also no known way of masking MSI interrupts on the device.
+ * For ranged MSI the CE-related interrupts can be masked. However
+ * regardless how many MSI interrupts are assigned the first one
+ * is always used for firmware indications (crashes) and cannot be
+ * masked. To prevent the device from asserting the interrupt reset it
+ * before proceeding with cleanup.
+ */
ath10k_pci_warm_reset(ar);
- ar_pci->started = 0;
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_flush(ar);
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1360,7 +1317,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
xfer.wait_for_resp = true;
xfer.resp_len = 0;
- ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
+ ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
}
ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
@@ -1418,6 +1375,7 @@ static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
+ struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
@@ -1429,7 +1387,7 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
return;
if (!xfer->wait_for_resp) {
- ath10k_warn("unexpected: BMI data received; ignoring\n");
+ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
return;
}
@@ -1457,129 +1415,17 @@ static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
}
/*
- * Map from service/endpoint to Copy Engine.
- * This table is derived from the CE_PCI TABLE, above.
- * It is passed to the Target at startup for use by firmware.
- */
-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_VO,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_VO,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_BK,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_BK,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_BE,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_BE,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_VI,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_DATA_VI,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_CONTROL,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- ATH10K_HTC_SVC_ID_WMI_CONTROL,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- ATH10K_HTC_SVC_ID_RSVD_CTRL,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 0, /* could be moved to 3 (share with WMI) */
- },
- {
- ATH10K_HTC_SVC_ID_RSVD_CTRL,
- PIPEDIR_IN, /* in = DL = target -> host */
- 1,
- },
- {
- ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
- PIPEDIR_OUT, /* out = UL = host -> target */
- 0,
- },
- {
- ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
- PIPEDIR_IN, /* in = DL = target -> host */
- 1,
- },
- {
- ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 4,
- },
- {
- ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
- PIPEDIR_IN, /* in = DL = target -> host */
- 1,
- },
-
- /* (Additions here) */
-
- { /* Must be last */
- 0,
- 0,
- 0,
- },
-};
-
-/*
* Send an interrupt to the device to wake up the Target CPU
* so it has an opportunity to notice any changed state.
*/
static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
{
- int ret;
- u32 core_ctrl;
+ u32 addr, val;
- ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
- CORE_CTRL_ADDRESS,
- &core_ctrl);
- if (ret) {
- ath10k_warn("failed to read core_ctrl: %d\n", ret);
- return ret;
- }
-
- /* A_INUM_FIRMWARE interrupt to Target CPU */
- core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
-
- ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
- CORE_CTRL_ADDRESS,
- core_ctrl);
- if (ret) {
- ath10k_warn("failed to set target CPU interrupt mask: %d\n",
- ret);
- return ret;
- }
+ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ val = ath10k_pci_read32(ar, addr);
+ val |= CORE_CTRL_CPU_INTR_MASK;
+ ath10k_pci_write32(ar, addr, val);
return 0;
}
@@ -1602,92 +1448,92 @@ static int ath10k_pci_init_config(struct ath10k *ar)
host_interest_item_address(HI_ITEM(hi_interconnect_state));
/* Supply Target-side CE configuration */
- ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
- &pcie_state_targ_addr);
+ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
+ &pcie_state_targ_addr);
if (ret != 0) {
- ath10k_err("Failed to get pcie state addr: %d\n", ret);
+ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
return ret;
}
if (pcie_state_targ_addr == 0) {
ret = -EIO;
- ath10k_err("Invalid pcie state addr\n");
+ ath10k_err(ar, "Invalid pcie state addr\n");
return ret;
}
- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
- pipe_cfg_addr),
- &pipe_cfg_targ_addr);
+ pipe_cfg_addr)),
+ &pipe_cfg_targ_addr);
if (ret != 0) {
- ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
+ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
return ret;
}
if (pipe_cfg_targ_addr == 0) {
ret = -EIO;
- ath10k_err("Invalid pipe cfg addr\n");
+ ath10k_err(ar, "Invalid pipe cfg addr\n");
return ret;
}
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
- target_ce_config_wlan,
- sizeof(target_ce_config_wlan));
+ target_ce_config_wlan,
+ sizeof(target_ce_config_wlan));
if (ret != 0) {
- ath10k_err("Failed to write pipe cfg: %d\n", ret);
+ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
return ret;
}
- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
- svc_to_pipe_map),
- &svc_to_pipe_map);
+ svc_to_pipe_map)),
+ &svc_to_pipe_map);
if (ret != 0) {
- ath10k_err("Failed to get svc/pipe map: %d\n", ret);
+ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
return ret;
}
if (svc_to_pipe_map == 0) {
ret = -EIO;
- ath10k_err("Invalid svc_to_pipe map\n");
+ ath10k_err(ar, "Invalid svc_to_pipe map\n");
return ret;
}
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
- target_service_to_ce_map_wlan,
- sizeof(target_service_to_ce_map_wlan));
+ target_service_to_ce_map_wlan,
+ sizeof(target_service_to_ce_map_wlan));
if (ret != 0) {
- ath10k_err("Failed to write svc/pipe map: %d\n", ret);
+ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
return ret;
}
- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
offsetof(struct pcie_state,
- config_flags),
- &pcie_config_flags);
+ config_flags)),
+ &pcie_config_flags);
if (ret != 0) {
- ath10k_err("Failed to get pcie config_flags: %d\n", ret);
+ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
return ret;
}
pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
- ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
- offsetof(struct pcie_state, config_flags),
- &pcie_config_flags,
- sizeof(pcie_config_flags));
+ ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ pcie_config_flags);
if (ret != 0) {
- ath10k_err("Failed to write pcie config_flags: %d\n", ret);
+ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
return ret;
}
/* configure early allocation */
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
- ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
+ ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
if (ret != 0) {
- ath10k_err("Faile to get early alloc val: %d\n", ret);
+ ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
return ret;
}
@@ -1697,26 +1543,26 @@ static int ath10k_pci_init_config(struct ath10k *ar)
ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
- ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
+ ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
if (ret != 0) {
- ath10k_err("Failed to set early alloc val: %d\n", ret);
+ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
return ret;
}
/* Tell Target to proceed with initialization */
flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
- ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
+ ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
if (ret != 0) {
- ath10k_err("Failed to get option val: %d\n", ret);
+ ath10k_err(ar, "Failed to get option val: %d\n", ret);
return ret;
}
flag2_value |= HI_OPTION_EARLY_CFG_DONE;
- ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
+ ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
if (ret != 0) {
- ath10k_err("Failed to set option val: %d\n", ret);
+ ath10k_err(ar, "Failed to set option val: %d\n", ret);
return ret;
}
@@ -1730,7 +1576,7 @@ static int ath10k_pci_alloc_ce(struct ath10k *ar)
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
- ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
@@ -1761,9 +1607,11 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num];
- ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
+ ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
+ ath10k_pci_ce_send_done,
+ ath10k_pci_ce_recv_data);
if (ret) {
- ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
pipe_num, ret);
return ret;
}
@@ -1777,38 +1625,25 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
continue;
}
- pipe_info->buf_sz = (size_t) (attr->src_sz_max);
+ pipe_info->buf_sz = (size_t)(attr->src_sz_max);
}
return 0;
}
-static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
+static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- u32 fw_indicator;
-
- ath10k_pci_wake(ar);
-
- fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
-
- if (fw_indicator & FW_IND_EVENT_PENDING) {
- /* ACK: clear Target-side pending event */
- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
- fw_indicator & ~FW_IND_EVENT_PENDING);
+ return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
+ FW_IND_EVENT_PENDING;
+}
- if (ar_pci->started) {
- ath10k_pci_hif_dump_area(ar);
- } else {
- /*
- * Probable Target failure before we're prepared
- * to handle it. Generally unexpected.
- */
- ath10k_warn("early firmware event indicated\n");
- }
- }
+static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
+{
+ u32 val;
- ath10k_pci_sleep(ar);
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ val &= ~FW_IND_EVENT_PENDING;
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
}
/* this function effectively clears target memory controller assert line */
@@ -1833,25 +1668,19 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
static int ath10k_pci_warm_reset(struct ath10k *ar)
{
- int ret = 0;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
-
- ret = ath10k_do_pci_wake(ar);
- if (ret) {
- ath10k_err("failed to wake up target: %d\n", ret);
- return ret;
- }
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
+ val);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CPU_INTR_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
val);
/* disable pending irqs */
@@ -1894,11 +1723,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
+ val);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CPU_INTR_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
val);
/* CPU warm reset */
@@ -1909,20 +1739,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
+ val);
msleep(100);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
- ath10k_do_pci_sleep(ar);
- return ret;
+ return 0;
}
static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- const char *irq_mode;
int ret;
/*
@@ -1941,80 +1769,39 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
ret = ath10k_pci_warm_reset(ar);
if (ret) {
- ath10k_err("failed to reset target: %d\n", ret);
+ ath10k_err(ar, "failed to reset target: %d\n", ret);
goto err;
}
- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- /* Force AWAKE forever */
- ath10k_do_pci_wake(ar);
-
ret = ath10k_pci_ce_init(ar);
if (ret) {
- ath10k_err("failed to initialize CE: %d\n", ret);
- goto err_ps;
- }
-
- ret = ath10k_ce_disable_interrupts(ar);
- if (ret) {
- ath10k_err("failed to disable CE interrupts: %d\n", ret);
- goto err_ce;
- }
-
- ret = ath10k_pci_init_irq(ar);
- if (ret) {
- ath10k_err("failed to init irqs: %d\n", ret);
- goto err_ce;
- }
-
- ret = ath10k_pci_request_early_irq(ar);
- if (ret) {
- ath10k_err("failed to request early irq: %d\n", ret);
- goto err_deinit_irq;
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
- ath10k_err("failed to wait for target to init: %d\n", ret);
- goto err_free_early_irq;
+ ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
+ goto err_ce;
}
ret = ath10k_pci_init_config(ar);
if (ret) {
- ath10k_err("failed to setup init config: %d\n", ret);
- goto err_free_early_irq;
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce;
}
ret = ath10k_pci_wake_target_cpu(ar);
if (ret) {
- ath10k_err("could not wake up target CPU: %d\n", ret);
- goto err_free_early_irq;
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce;
}
- if (ar_pci->num_msi_intrs > 1)
- irq_mode = "MSI-X";
- else if (ar_pci->num_msi_intrs == 1)
- irq_mode = "MSI";
- else
- irq_mode = "legacy";
-
- if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
- irq_mode, ath10k_pci_irq_mode,
- ath10k_pci_reset_mode);
-
return 0;
-err_free_early_irq:
- ath10k_pci_free_early_irq(ar);
-err_deinit_irq:
- ath10k_pci_deinit_irq(ar);
err_ce:
ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
-err_ps:
- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- ath10k_do_pci_sleep(ar);
err:
return ret;
}
@@ -2034,7 +1821,7 @@ static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
if (ret == 0)
break;
- ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
+ ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
}
@@ -2045,7 +1832,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
@@ -2057,17 +1844,17 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/
ret = ath10k_pci_hif_power_up_warm(ar);
if (ret) {
- ath10k_warn("failed to power up target using warm reset: %d\n",
+ ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
ret);
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
return ret;
- ath10k_warn("trying cold reset\n");
+ ath10k_warn(ar, "trying cold reset\n");
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
- ath10k_err("failed to power up target using cold reset too (%d)\n",
+ ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
ret);
return ret;
}
@@ -2078,18 +1865,9 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
- ath10k_pci_free_early_irq(ar);
- ath10k_pci_kill_tasklet(ar);
- ath10k_pci_deinit_irq(ar);
- ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
-
- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
- ath10k_do_pci_sleep(ar);
}
#ifdef CONFIG_PM
@@ -2171,7 +1949,13 @@ static void ath10k_msi_err_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
- ath10k_pci_fw_interrupt_handler(ar);
+ if (!ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
+ return;
+ }
+
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
}
/*
@@ -2185,7 +1969,8 @@ static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
- ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
return IRQ_HANDLED;
}
@@ -2232,36 +2017,17 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void ath10k_pci_early_irq_tasklet(unsigned long data)
+static void ath10k_pci_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
- u32 fw_ind;
- int ret;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_warn("failed to wake target in early irq tasklet: %d\n",
- ret);
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
return;
}
- fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
- if (fw_ind & FW_IND_EVENT_PENDING) {
- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
- fw_ind & ~FW_IND_EVENT_PENDING);
- ath10k_pci_hif_dump_area(ar);
- }
-
- ath10k_pci_sleep(ar);
- ath10k_pci_enable_legacy_irq(ar);
-}
-
-static void ath10k_pci_tasklet(unsigned long data)
-{
- struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
ath10k_ce_per_engine_service_any(ar);
/* Re-enable legacy irq that was disabled in the irq handler */
@@ -2278,7 +2044,7 @@ static int ath10k_pci_request_irq_msix(struct ath10k *ar)
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
+ ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
return ret;
}
@@ -2288,7 +2054,7 @@ static int ath10k_pci_request_irq_msix(struct ath10k *ar)
ath10k_pci_per_engine_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
+ ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
ar_pci->pdev->irq + i, ret);
for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
@@ -2311,7 +2077,7 @@ static int ath10k_pci_request_irq_msi(struct ath10k *ar)
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request MSI irq %d: %d\n",
+ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
ar_pci->pdev->irq, ret);
return ret;
}
@@ -2328,7 +2094,7 @@ static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request legacy irq %d: %d\n",
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
ar_pci->pdev->irq, ret);
return ret;
}
@@ -2349,7 +2115,7 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
return ath10k_pci_request_irq_msix(ar);
}
- ath10k_warn("unknown irq configuration upon request\n");
+ ath10k_warn(ar, "unknown irq configuration upon request\n");
return -EINVAL;
}
@@ -2372,8 +2138,6 @@ static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
(unsigned long)ar);
- tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
- (unsigned long)ar);
for (i = 0; i < CE_COUNT; i++) {
ar_pci->pipe_info[i].ar_pci = ar_pci;
@@ -2385,21 +2149,19 @@ static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
static int ath10k_pci_init_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
- ar_pci->features);
int ret;
ath10k_pci_init_irq_tasklets(ar);
- if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
- !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
+ ath10k_info(ar, "limiting irq mode to: %d\n",
+ ath10k_pci_irq_mode);
/* Try MSI-X */
- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
+ if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
- ar_pci->num_msi_intrs);
+ ar_pci->num_msi_intrs);
if (ret > 0)
return 0;
@@ -2426,34 +2188,16 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* synchronization checking. */
ar_pci->num_msi_intrs = 0;
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_warn("failed to wake target: %d\n", ret);
- return ret;
- }
-
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
- ath10k_pci_sleep(ar);
return 0;
}
-static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
{
- int ret;
-
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_warn("failed to wake target: %d\n", ret);
- return ret;
- }
-
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0);
- ath10k_pci_sleep(ar);
-
- return 0;
}
static int ath10k_pci_deinit_irq(struct ath10k *ar)
@@ -2462,7 +2206,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
switch (ar_pci->num_msi_intrs) {
case 0:
- return ath10k_pci_deinit_irq_legacy(ar);
+ ath10k_pci_deinit_irq_legacy(ar);
+ return 0;
case 1:
/* fall-through */
case MSI_NUM_REQUEST:
@@ -2472,7 +2217,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
pci_disable_msi(ar_pci->pdev);
}
- ath10k_warn("unknown irq configuration upon deinit\n");
+ ath10k_warn(ar, "unknown irq configuration upon deinit\n");
return -EINVAL;
}
@@ -2480,23 +2225,17 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long timeout;
- int ret;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
-
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_err("failed to wake up target for init: %d\n", ret);
- return ret;
- }
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
do {
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
+ val);
/* target should never return this */
if (val == 0xffffffff)
@@ -2511,55 +2250,42 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
- ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
- PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL);
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL);
mdelay(10);
} while (time_before(jiffies, timeout));
if (val == 0xffffffff) {
- ath10k_err("failed to read device register, device is gone\n");
- ret = -EIO;
- goto out;
+ ath10k_err(ar, "failed to read device register, device is gone\n");
+ return -EIO;
}
if (val & FW_IND_EVENT_PENDING) {
- ath10k_warn("device has crashed during init\n");
- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
- val & ~FW_IND_EVENT_PENDING);
- ath10k_pci_hif_dump_area(ar);
- ret = -ECOMM;
- goto out;
+ ath10k_warn(ar, "device has crashed during init\n");
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ return -ECOMM;
}
if (!(val & FW_IND_INITIALIZED)) {
- ath10k_err("failed to receive initialized event from target: %08x\n",
+ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
val);
- ret = -ETIMEDOUT;
- goto out;
+ return -ETIMEDOUT;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
-
-out:
- ath10k_pci_sleep(ar);
- return ret;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
+ return 0;
}
static int ath10k_pci_cold_reset(struct ath10k *ar)
{
- int i, ret;
+ int i;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
-
- ret = ath10k_do_pci_wake(ar);
- if (ret) {
- ath10k_err("failed to wake up target: %d\n",
- ret);
- return ret;
- }
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
/* Put Target, including PCIe, into RESET. */
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
@@ -2584,169 +2310,199 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
msleep(1);
}
- ath10k_do_pci_sleep(ar);
-
- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
return 0;
}
-static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
-{
- int i;
-
- for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
- if (!test_bit(i, ar_pci->features))
- continue;
-
- switch (i) {
- case ATH10K_PCI_FEATURE_MSI_X:
- ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
- break;
- case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
- ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
- break;
- }
- }
-}
-
-static int ath10k_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_dev)
+static int ath10k_pci_claim(struct ath10k *ar)
{
- void __iomem *mem;
- int ret = 0;
- struct ath10k *ar;
- struct ath10k_pci *ar_pci;
- u32 lcr_val, chip_id;
-
- ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
-
- ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
- if (ar_pci == NULL)
- return -ENOMEM;
-
- ar_pci->pdev = pdev;
- ar_pci->dev = &pdev->dev;
-
- switch (pci_dev->device) {
- case QCA988X_2_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
- break;
- default:
- ret = -ENODEV;
- ath10k_err("Unknown device ID: %d\n", pci_dev->device);
- goto err_ar_pci;
- }
-
- if (ath10k_pci_target_ps)
- set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
-
- ath10k_pci_dump_features(ar_pci);
-
- ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
- if (!ar) {
- ath10k_err("failed to create driver core\n");
- ret = -EINVAL;
- goto err_ar_pci;
- }
-
- ar_pci->ar = ar;
- atomic_set(&ar_pci->keep_awake_count, 0);
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 lcr_val;
+ int ret;
pci_set_drvdata(pdev, ar);
ret = pci_enable_device(pdev);
if (ret) {
- ath10k_err("failed to enable PCI device: %d\n", ret);
- goto err_ar;
+ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
+ return ret;
}
- /* Request MMIO resources */
ret = pci_request_region(pdev, BAR_NUM, "ath");
if (ret) {
- ath10k_err("failed to request MMIO region: %d\n", ret);
+ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
+ ret);
goto err_device;
}
- /*
- * Target structures have a limit of 32 bit DMA pointers.
- * DMA pointers can be wider than 32 bits by default on some systems.
- */
+ /* Target expects 32 bit DMA. Enforce it. */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
+ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
goto err_region;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("failed to set consistent DMA mask to 32-bit\n");
+ ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
+ ret);
goto err_region;
}
- /* Set bus master bit in PCI_COMMAND to enable DMA */
pci_set_master(pdev);
- /*
- * Temporary FIX: disable ASPM
- * Will be removed after the OTP is programmed
- */
+ /* Workaround: Disable ASPM */
pci_read_config_dword(pdev, 0x80, &lcr_val);
pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
/* Arrange for access to Target SoC registers. */
- mem = pci_iomap(pdev, BAR_NUM, 0);
- if (!mem) {
- ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
+ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
+ if (!ar_pci->mem) {
+ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
ret = -EIO;
goto err_master;
}
- ar_pci->mem = mem;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ return 0;
+
+err_master:
+ pci_clear_master(pdev);
+
+err_region:
+ pci_release_region(pdev, BAR_NUM);
+
+err_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void ath10k_pci_release(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+
+ pci_iounmap(pdev, ar_pci->mem);
+ pci_release_region(pdev, BAR_NUM);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ int ret = 0;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ u32 chip_id;
+
+ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
+ &ath10k_pci_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_pci->pdev = pdev;
+ ar_pci->dev = &pdev->dev;
+ ar_pci->ar = ar;
spin_lock_init(&ar_pci->ce_lock);
+ setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
+ (unsigned long)ar);
- ret = ath10k_do_pci_wake(ar);
+ ret = ath10k_pci_claim(ar);
if (ret) {
- ath10k_err("Failed to get chip id: %d\n", ret);
- goto err_iomap;
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_core_destroy;
}
- chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up: %d\n", ret);
+ goto err_release;
+ }
- ath10k_do_pci_sleep(ar);
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ goto err_sleep;
+ }
ret = ath10k_pci_alloc_ce(ar);
if (ret) {
- ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
- goto err_iomap;
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ goto err_sleep;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ ath10k_pci_ce_deinit(ar);
- ret = ath10k_core_register(ar, chip_id);
+ ret = ath10k_ce_disable_interrupts(ar);
if (ret) {
- ath10k_err("failed to register driver core: %d\n", ret);
+ ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
+ ret);
goto err_free_ce;
}
+ /* Workaround: There's no known way to mask all possible interrupts via
+ * device CSR. The only way to make sure device doesn't assert
+ * interrupts is to reset it. Interrupts are then disabled on host
+ * after handlers are registered.
+ */
+ ath10k_pci_warm_reset(ar);
+
+ ret = ath10k_pci_init_irq(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to init irqs: %d\n", ret);
+ goto err_free_ce;
+ }
+
+ ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+ ath10k_pci_irq_mode, ath10k_pci_reset_mode);
+
+ ret = ath10k_pci_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ /* This shouldn't race as the device has been reset above. */
+ ath10k_pci_irq_disable(ar);
+
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_irq;
+ }
+
return 0;
+err_free_irq:
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+
+err_deinit_irq:
+ ath10k_pci_deinit_irq(ar);
+
err_free_ce:
ath10k_pci_free_ce(ar);
-err_iomap:
- pci_iounmap(pdev, mem);
-err_master:
- pci_clear_master(pdev);
-err_region:
- pci_release_region(pdev, BAR_NUM);
-err_device:
- pci_disable_device(pdev);
-err_ar:
+
+err_sleep:
+ ath10k_pci_sleep(ar);
+
+err_release:
+ ath10k_pci_release(ar);
+
+err_core_destroy:
ath10k_core_destroy(ar);
-err_ar_pci:
- /* call HIF PCI free here */
- kfree(ar_pci);
return ret;
}
@@ -2756,7 +2512,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci;
- ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
@@ -2767,15 +2523,14 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
return;
ath10k_core_unregister(ar);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_deinit_irq(ar);
+ ath10k_pci_ce_deinit(ar);
ath10k_pci_free_ce(ar);
-
- pci_iounmap(pdev, ar_pci->mem);
- pci_release_region(pdev, BAR_NUM);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-
+ ath10k_pci_sleep(ar);
+ ath10k_pci_release(ar);
ath10k_core_destroy(ar);
- kfree(ar_pci);
}
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
@@ -2793,7 +2548,8 @@ static int __init ath10k_pci_init(void)
ret = pci_register_driver(&ath10k_pci_driver);
if (ret)
- ath10k_err("failed to register PCI driver: %d\n", ret);
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+ ret);
return ret;
}
@@ -2809,5 +2565,5 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);