aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/pci.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c271
1 files changed, 94 insertions, 177 deletions
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index b3cff1d3364a..8133d7b5b956 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -33,12 +33,6 @@
#include "ce.h"
#include "pci.h"
-enum ath10k_pci_irq_mode {
- ATH10K_PCI_IRQ_AUTO = 0,
- ATH10K_PCI_IRQ_LEGACY = 1,
- ATH10K_PCI_IRQ_MSI = 2,
-};
-
enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_AUTO = 0,
ATH10K_PCI_RESET_WARM_ONLY = 1,
@@ -745,10 +739,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- if (ar_pci->num_msi_intrs > 1)
- return "msi-x";
-
- if (ar_pci->num_msi_intrs == 1)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
return "msi";
return "legacy";
@@ -809,7 +800,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
spin_lock_bh(&ar_pci->ce_lock);
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
spin_unlock_bh(&ar_pci->ce_lock);
- while (num--) {
+
+ while (num >= 0) {
ret = __ath10k_pci_rx_post_buf(pipe);
if (ret) {
if (ret == -ENOSPC)
@@ -819,6 +811,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ATH10K_PCI_RX_POST_RETRY_MS);
break;
}
+ num--;
}
}
@@ -870,10 +863,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -909,7 +900,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@@ -940,9 +931,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -956,7 +948,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
- if (buf != ce_data) {
+ if (*buf != ce_data) {
ret = -EIO;
goto done;
}
@@ -1026,10 +1018,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
@@ -1078,7 +1068,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1103,9 +1093,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1119,7 +1110,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- if (buf != address) {
+ if (*buf != address) {
ret = -EIO;
goto done;
}
@@ -1181,15 +1172,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
struct sk_buff *skb;
struct sk_buff_head list;
void *transfer_context;
- u32 ce_data;
unsigned int nbytes, max_nbytes;
- unsigned int transfer_id;
- unsigned int flags;
__skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
- &ce_data, &nbytes, &transfer_id,
- &flags) == 0) {
+ &nbytes) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1218,6 +1205,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
ath10k_pci_rx_post_pipe(pipe_info);
}
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes, nentries;
+ int orig_len;
+
+ /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ * is processed other than init and deinit. Before releasing CE5
+ * buffers, interrupts are disabled. Thus CE5 access is serialized.
+ */
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ continue;
+ }
+
+ dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ nentries = skb_queue_len(&list);
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ orig_len = skb->len;
+ callback(ar, skb);
+ skb_push(skb, orig_len - skb->len);
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ }
+ ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
/* Called by lower (CE) layer when data is received from the Target. */
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
{
@@ -1274,7 +1318,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
*/
ath10k_ce_per_engine_service(ce_state->ar, 4);
- ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+ ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
}
int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -1449,13 +1493,8 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
void ath10k_pci_kill_tasklet(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
tasklet_kill(&ar_pci->intr_tq);
- tasklet_kill(&ar_pci->msi_fw_err);
-
- for (i = 0; i < CE_COUNT; i++)
- tasklet_kill(&ar_pci->pipe_info[i].intr);
del_timer_sync(&ar_pci->rx_post_retry);
}
@@ -1571,10 +1610,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar)
static void ath10k_pci_irq_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- synchronize_irq(ar_pci->pdev->irq + i);
+ synchronize_irq(ar_pci->pdev->irq);
}
static void ath10k_pci_irq_enable(struct ath10k *ar)
@@ -1835,13 +1872,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
- u32 ce_data;
unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
- if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
- &nbytes, &transfer_id, &flags))
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
return;
if (WARN_ON_ONCE(!xfer))
@@ -2546,65 +2580,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
#endif
};
-static void ath10k_pci_ce_tasklet(unsigned long ptr)
-{
- struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
- struct ath10k_pci *ar_pci = pipe->ar_pci;
-
- ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
-}
-
-static void ath10k_msi_err_tasklet(unsigned long data)
-{
- struct ath10k *ar = (struct ath10k *)data;
-
- if (!ath10k_pci_has_fw_crashed(ar)) {
- ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
- return;
- }
-
- ath10k_pci_irq_disable(ar);
- ath10k_pci_fw_crashed_clear(ar);
- ath10k_pci_fw_crashed_dump(ar);
-}
-
-/*
- * Handler for a per-engine interrupt on a PARTICULAR CE.
- * This is used in cases where each CE has a private MSI interrupt.
- */
-static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
-
- if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
- ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
- ce_id);
- return IRQ_HANDLED;
- }
-
- /*
- * NOTE: We are able to derive ce_id from irq because we
- * use a one-to-one mapping for CE's 0..5.
- * CE's 6 & 7 do not use interrupts at all.
- *
- * This mapping must be kept in sync with the mapping
- * used by firmware.
- */
- tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- tasklet_schedule(&ar_pci->msi_fw_err);
- return IRQ_HANDLED;
-}
-
/*
* Top-level interrupt handler for all PCI interrupts from a Target.
* When a block of MSI interrupts is allocated, this top-level handler
@@ -2622,7 +2597,7 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if (ar_pci->num_msi_intrs == 0) {
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
@@ -2649,43 +2624,10 @@ static void ath10k_pci_tasklet(unsigned long data)
ath10k_ce_per_engine_service_any(ar);
/* Re-enable legacy irq that was disabled in the irq handler */
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
ath10k_pci_enable_legacy_irq(ar);
}
-static int ath10k_pci_request_irq_msix(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret, i;
-
- ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
- ath10k_pci_msi_fw_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
- ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
- return ret;
- }
-
- for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
- ret = request_irq(ar_pci->pdev->irq + i,
- ath10k_pci_per_engine_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
- ar_pci->pdev->irq + i, ret);
-
- for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
- free_irq(ar_pci->pdev->irq + i, ar);
-
- free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
- return ret;
- }
- }
-
- return 0;
-}
-
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2724,41 +2666,28 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
return ath10k_pci_request_irq_legacy(ar);
- case 1:
+ case ATH10K_PCI_IRQ_MSI:
return ath10k_pci_request_irq_msi(ar);
default:
- return ath10k_pci_request_irq_msix(ar);
+ return -EINVAL;
}
}
static void ath10k_pci_free_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- /* There's at least one interrupt irregardless whether its legacy INTR
- * or MSI or MSI-X */
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- free_irq(ar_pci->pdev->irq + i, ar);
+ free_irq(ar_pci->pdev->irq, ar);
}
void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
- tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
- (unsigned long)ar);
-
- for (i = 0; i < CE_COUNT; i++) {
- ar_pci->pipe_info[i].ar_pci = ar_pci;
- tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
- (unsigned long)&ar_pci->pipe_info[i]);
- }
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2772,20 +2701,9 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
ath10k_info(ar, "limiting irq mode to: %d\n",
ath10k_pci_irq_mode);
- /* Try MSI-X */
- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
- ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
- ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
- ar_pci->num_msi_intrs);
- if (ret > 0)
- return 0;
-
- /* fall-through */
- }
-
/* Try MSI */
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
- ar_pci->num_msi_intrs = 1;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
return 0;
@@ -2801,7 +2719,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* This write might get lost if target has NOT written BAR.
* For now, fix the race by repeating the write in below
* synchronization checking. */
- ar_pci->num_msi_intrs = 0;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -2819,8 +2737,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
ath10k_pci_deinit_irq_legacy(ar);
break;
default:
@@ -2858,7 +2776,7 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_INITIALIZED)
break;
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
/* Fix potential race by repeating CORE_BASE writes */
ath10k_pci_enable_legacy_irq(ar);
@@ -3136,8 +3054,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_sleep;
}
- ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
- ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+ ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
ret = ath10k_pci_request_irq(ar);
@@ -3255,7 +3173,6 @@ MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
/* QCA988x 2.0 firmware files */
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);