aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/trans.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c59
1 files changed, 35 insertions, 24 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index bee6b4574226..f252680f18e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -449,11 +449,23 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
+ CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
+ 100);
+ } else {
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ }
+
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -1866,6 +1878,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ /* free all first - we might be reconfigured for a different size */
+ iwl_pcie_free_rbs_pool(trans);
+
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
@@ -1992,15 +2007,24 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
+ u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
+ u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
spin_lock(&trans_pcie->reg_lock);
if (trans_pcie->cmd_hold_nic_awake)
goto out;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
+ mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ }
+
/* this bit wakes up the NIC */
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -2024,10 +2048,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
* 5000 series and later (including 1000 series) have non-volatile SRAM,
* and do not save/restore SRAM when power cycling.
*/
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2947,8 +2968,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
struct iwl_fw_error_dump_rb *rb;
- dma_unmap_page(trans->dev, rxb->page_dma, max_len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
+ max_len, DMA_FROM_DEVICE);
rb_len += sizeof(**data) + sizeof(*rb) + max_len;
@@ -2957,10 +2978,6 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb = (void *)(*data)->data;
rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len);
- /* remap the page for the free benefit */
- rxb->page_dma = dma_map_page(trans->dev, rxb->page,
- rxb->offset, max_len,
- DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data);
}
@@ -3489,15 +3506,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
addr_size = trans->txqs.tfd.addr_size;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
- if (!ret)
- ret = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(addr_size));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!ret)
- ret = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
/* both attempts failed: */
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");