aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/rx.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c123
1 files changed, 73 insertions, 50 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4be3c35afd19..0a4a3c502c3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
-static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
+static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
+ u64 val)
{
- iwl_write_prph(trans, ofs, val & 0xffffffff);
- iwl_write_prph(trans, ofs + 4, val >> 32);
+ iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
+ iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
/*
@@ -208,10 +209,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->write_actual = round_down(rxq->write, 8);
if (trans->cfg->mq_rx_supported)
- iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
- rxq->write_actual);
- else
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
+ rxq->write_actual);
+ /*
+ * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
+ * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
+ * not wake the NIC.
+ */
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -694,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
+ unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) {
@@ -711,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
}
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
/* Stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* reset and flush pointers */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
+ iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
@@ -737,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
* RB timeout 0x10
* 256 RBDs
*/
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size|
- (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size |
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -757,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0;
+ unsigned long flags;
int i;
switch (trans_pcie->rx_buf_size) {
@@ -774,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
}
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
/* Stop Rx DMA */
- iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
/* disable free amd used rx queue operation */
- iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */
- iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
- (u64)(trans_pcie->rxq[i].bd_dma));
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_FRBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */
- iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
- (u64)(trans_pcie->rxq[i].used_bd_dma));
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_URBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */
- iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
- trans_pcie->rxq[i].rb_stts_dma);
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_URBD_STTS_WPTR_LSB(i),
+ trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */
- iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
- iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
- iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
enabled |= BIT(i) | BIT(i + 16);
}
@@ -808,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Drop frames that exceed RB size
* 512 RBDs
*/
- iwl_write_prph(trans, RFH_RXF_DMA_CFG,
- RFH_DMA_EN_ENABLE_VAL |
- rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
- RFH_RXF_DMA_MIN_RB_4_8 |
- RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
- RFH_RXF_DMA_RBDCB_SIZE_512);
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
+ RFH_DMA_EN_ENABLE_VAL |
+ rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+ RFH_RXF_DMA_MIN_RB_4_8 |
+ RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
+ RFH_RXF_DMA_RBDCB_SIZE_512);
/*
* Activate DMA snooping.
* Set RX DMA chunk size to 64B
* Default queue is 0
*/
- iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
- (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
- RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+ iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
+ (DEFAULT_RXQ_NUM <<
+ RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
+ RFH_GEN_CFG_SERVICE_DMA_SNOOP);
/* Enable the relevant rx queues */
- iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+
+ iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -908,6 +929,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
+ BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+ ARRAY_SIZE(trans_pcie->rx_pool));
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1292,7 +1315,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
- iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
/*
@@ -1805,19 +1828,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
- struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock(&trans_pcie->irq_lock);
- inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
- inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
* Clear causes registers to avoid being handling the same cause.
*/
- iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
- iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
spin_unlock(&trans_pcie->irq_lock);
if (unlikely(!(inta_fh | inta_hw))) {