aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c283
1 files changed, 188 insertions, 95 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 37bbd9a07f36..42426e25cac6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -207,10 +207,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
if (!rxq->need_update)
continue;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
rxq->need_update = false;
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -255,7 +255,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
while (rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
@@ -269,16 +269,16 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
rxq->free_count--;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
/*
* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8.
*/
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -301,7 +301,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
__le32 *bd = (__le32 *)rxq->bd;
/* The overwritten rxb must be a used one */
@@ -320,14 +320,14 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -433,28 +433,28 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
while (1) {
unsigned int offset;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
return;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -466,19 +466,19 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
@@ -514,10 +514,10 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
/* If we were scheduled - there is at least one request */
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* swap out the rba->rbd_empty to a local list */
list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
while (pending) {
int i;
@@ -577,21 +577,21 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
pending);
}
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* add the allocated rbds to the allocator allocated list */
list_splice_tail(&local_allocated, &rba->rbd_allocated);
/* get more empty RBDs for current pending requests */
list_splice_tail_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
atomic_inc(&rba->req_ready);
}
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* return unused rbds to the allocator empty list */
list_splice_tail(&local_empty, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
}
@@ -834,8 +834,11 @@ err:
trans_pcie->base_rb_stts_dma = 0;
}
kfree(trans_pcie->rx_pool);
+ trans_pcie->rx_pool = NULL;
kfree(trans_pcie->global_table);
+ trans_pcie->global_table = NULL;
kfree(trans_pcie->rxq);
+ trans_pcie->rxq = NULL;
return ret;
}
@@ -844,7 +847,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
- unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) {
@@ -862,7 +864,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
}
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return;
/* Stop Rx DMA */
@@ -899,7 +901,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -913,7 +915,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0;
- unsigned long flags;
int i;
switch (trans_pcie->rx_buf_size) {
@@ -934,7 +935,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
}
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(trans))
return;
/* Stop Rx DMA */
@@ -992,7 +993,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* Enable the relevant rx queues */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
- iwl_trans_release_nic_access(trans, &flags);
+ iwl_trans_release_nic_access(trans);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -1008,10 +1009,76 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0;
}
-int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+
+static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
{
- WARN_ON(1);
- return 0;
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, rxq->id);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, 0);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
}
static int _iwl_pcie_rx_init(struct iwl_trans *trans)
@@ -1030,12 +1097,12 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
cancel_work_sync(&rba->rx_alloc);
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
atomic_set(&rba->req_pending, 0);
atomic_set(&rba->req_ready, 0);
INIT_LIST_HEAD(&rba->rbd_allocated);
INIT_LIST_HEAD(&rba->rbd_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
@@ -1046,7 +1113,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
/*
* Set read write pointer to reflect that we have processed
* and used all buffers, but have not restocked the Rx queue
@@ -1062,11 +1129,27 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rx_init_rxb_lists(rxq);
- if (!rxq->napi.poll)
+ if (!rxq->napi.poll) {
+ int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
+
+ if (trans_pcie->msix_enabled) {
+ poll = iwl_pcie_napi_poll_msix;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX &&
+ i == 0)
+ poll = iwl_pcie_napi_poll_msix_shared;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
+ i == 1)
+ poll = iwl_pcie_napi_poll_msix_shared;
+ }
+
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
- iwl_pcie_dummy_napi_poll, 64);
+ poll, NAPI_POLL_WEIGHT);
+ napi_enable(&rxq->napi);
+ }
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
/* move the pool to the default queue and allocator ownerships */
@@ -1108,9 +1191,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
- spin_lock(&trans_pcie->rxq->lock);
+ spin_lock_bh(&trans_pcie->rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
- spin_unlock(&trans_pcie->rxq->lock);
+ spin_unlock_bh(&trans_pcie->rxq->lock);
return 0;
}
@@ -1163,8 +1246,10 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
iwl_pcie_free_rxq_dma(trans, rxq);
- if (rxq->napi.poll)
+ if (rxq->napi.poll) {
+ napi_disable(&rxq->napi);
netif_napi_del(&rxq->napi);
+ }
}
kfree(trans_pcie->rx_pool);
kfree(trans_pcie->global_table);
@@ -1417,16 +1502,15 @@ out_err:
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
-static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct napi_struct *napi;
struct iwl_rxq *rxq;
- u32 r, i, count = 0;
+ u32 r, i, count = 0, handled = 0;
bool emergency = false;
if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
- return;
+ return budget;
rxq = &trans_pcie->rxq[queue];
@@ -1444,7 +1528,7 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
- while (i != r) {
+ while (i != r && ++handled < budget) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb;
/* number of RBDs still waiting for page allocation */
@@ -1545,18 +1629,9 @@ out:
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- napi = &rxq->napi;
- if (napi->poll) {
- napi_gro_flush(napi, false);
-
- if (napi->rx_count) {
- netif_receive_skb_list(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
- }
- }
-
iwl_pcie_rxq_restock(trans, rxq);
+
+ return handled;
}
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
@@ -1576,20 +1651,25 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
+ struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
+ if (WARN_ONCE(!rxq, "Got MSI-X interrupt before we have Rx queues"))
+ return IRQ_NONE;
+
lock_map_acquire(&trans->sync_cmd_lockdep_map);
local_bh_disable();
- iwl_pcie_rx_handle(trans, entry->entry);
+ if (napi_schedule_prep(&rxq->napi))
+ __napi_schedule(&rxq->napi);
+ else
+ iwl_pcie_clear_irq(trans, entry->entry);
local_bh_enable();
- iwl_pcie_clear_irq(trans, entry);
-
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
@@ -1600,7 +1680,6 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
*/
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@@ -1612,7 +1691,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
APMG_PS_CTRL_VAL_RESET_REQ))) {
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
return;
}
@@ -1627,7 +1706,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_trans_fw_error(trans);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
}
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
@@ -1742,7 +1821,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans_pcie->wait_command_queue);
+ wake_up(&trans->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
if (trans_pcie->opmode_down)
@@ -1757,10 +1836,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
+ bool polling = false;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
/* dram interrupt table not set yet,
* use legacy interrupt.
@@ -1797,7 +1877,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
*/
if (test_bit(STATUS_INT_ENABLED, &trans->status))
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_NONE;
}
@@ -1808,7 +1888,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* already raised an interrupt.
*/
IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
goto out;
}
@@ -1829,7 +1909,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1949,7 +2029,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rx++;
local_bh_disable();
- iwl_pcie_rx_handle(trans, 0);
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
local_bh_enable();
}
@@ -1974,20 +2057,22 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
inta & ~trans_pcie->inta_mask);
}
- spin_lock(&trans_pcie->irq_lock);
- /* only Re-enable all interrupt if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
- _iwl_enable_interrupts(trans);
- /* we are loading the firmware, enable FH_TX interrupt only */
- else if (handled & CSR_INT_BIT_FH_TX)
- iwl_enable_fw_load_int(trans);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(trans);
- /* Re-enable the ALIVE / Rx interrupt if it occurred */
- else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
- iwl_enable_fw_load_int_ctx_info(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ if (!polling) {
+ spin_lock_bh(&trans_pcie->irq_lock);
+ /* only Re-enable all interrupt if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ /* we are loading the firmware, enable FH_TX interrupt only */
+ else if (handled & CSR_INT_BIT_FH_TX)
+ iwl_enable_fw_load_int(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
+ /* Re-enable the ALIVE / Rx interrupt if it occurred */
+ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+ iwl_enable_fw_load_int_ctx_info(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ }
out:
lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -2049,7 +2134,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
if (!trans_pcie->ict_tbl)
return;
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
_iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -2067,7 +2152,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
@@ -2075,9 +2160,9 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
irqreturn_t iwl_pcie_isr(int irq, void *data)
@@ -2109,10 +2194,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct iwl_trans *trans = trans_pcie->trans;
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
+ bool polling = false;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
@@ -2120,7 +2206,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
*/
iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
@@ -2146,14 +2232,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
local_bh_disable();
- iwl_pcie_rx_handle(trans, 0);
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
local_bh_enable();
}
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
inta_fh & MSIX_FH_INT_CAUSES_Q1) {
local_bh_disable();
- iwl_pcie_rx_handle(trans, 1);
+ if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[1].napi);
+ }
local_bh_enable();
}
@@ -2248,7 +2340,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
wake_up(&trans_pcie->fw_reset_waitq);
}
- iwl_pcie_clear_irq(trans, entry);
+ if (!polling)
+ iwl_pcie_clear_irq(trans, entry->entry);
lock_map_release(&trans->sync_cmd_lockdep_map);