aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c80
9 files changed, 567 insertions, 214 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index a4e09a5b1816..01f248ba8fec 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+ cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d38cefbb779e..acd01d86f101 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -57,6 +57,42 @@
#include "internal.h"
#include "iwl-prph.h"
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys,
+ int depth)
+{
+ void *result;
+
+ if (WARN(depth > 2,
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
+ return NULL;
+
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+ if (!result)
+ return NULL;
+
+ if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ void *old = result;
+ dma_addr_t oldphys = *phys;
+
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+ phys,
+ depth + 1);
+ dma_free_coherent(trans->dev, size, old, oldphys);
+ }
+
+ return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys)
+{
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
+ dma_addr_t phys;
int ret;
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+ sizeof(*ctxt_info),
+ &phys);
if (!ctxt_info)
return -ENOMEM;
+ trans_pcie->ctxt_info_dma_addr = phys;
+
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
@@ -193,11 +232,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
- control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
- (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
- IWL_CTXT_INFO_RB_CB_SIZE_POS) |
- (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
+ WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
+ control_flags |=
+ u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
+ IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b0b7eca1754e..f441b20e1642 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -565,14 +565,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
+
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -598,21 +592,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
@@ -910,7 +895,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
@@ -987,28 +971,77 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name }
+
+static const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLMVM)
+ IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+
+ IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+#endif /* CONFIG_IWLMVM */
+};
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg_trans_params *trans =
+ (struct iwl_cfg_trans_params *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
+ struct iwl_trans_pcie *trans_pcie;
unsigned long flags;
- int ret;
+ int i, ret;
+ /*
+ * This is needed for backwards compatibility with the old
+ * tables, so we don't need to change all the config structs
+ * at the same time. The cfg is used to compare with the old
+ * full cfg structs.
+ */
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans);
+ /* make sure trans is the first element in iwl_cfg */
+ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+
+ iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
- /* the trans_cfg should never change, so set it now */
- iwl_trans->trans_cfg = &cfg->trans;
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- if (WARN_ONCE(!iwl_trans->trans_cfg->csr,
- "CSR addresses aren't configured\n")) {
- ret = -EINVAL;
- goto out_free_trans;
+ /* the trans_cfg should never change, so set it now */
+ iwl_trans->trans_cfg = trans;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
+ const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+
+ if ((dev_info->device == IWL_CFG_ANY ||
+ dev_info->device == pdev->device) &&
+ (dev_info->subdevice == IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device)) {
+ iwl_trans->cfg = dev_info->cfg;
+ iwl_trans->name = dev_info->name;
+ goto found;
+ }
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1027,22 +1060,22 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
- cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
- cfg = &iwlax210_2ax_cfg_ty_gf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- cfg = &iwlax211_2ax_cfg_so_gf_a0;
+ iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- cfg = &iwlax411_2ax_cfg_so_gf4_a0;
+ iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
@@ -1050,13 +1083,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
(CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- cfg = &iwl_ax101_cfg_qu_hr;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwl22000_2ax_cfg_jf;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
@@ -1073,19 +1110,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
- /*
- * b step fw is the same for physical card and fpga
- */
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_A_STEP) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
- } else {
- /*
- * a step no FPGA
- */
- cfg = &iwl22000_2ac_cfg_hr;
- }
+ CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
+ SILICON_A_STEP)
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
@@ -1096,17 +1125,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
if (cfg == &iwl_ax101_cfg_qu_hr)
- cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
else if (cfg == &iwl_ax201_cfg_qu_hr)
- cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
/* same thing for QuZ... */
@@ -1126,8 +1159,27 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
- /* now set the real cfg we decided to use */
- iwl_trans->cfg = cfg;
+ /*
+ * If we didn't set the cfg yet, assume the trans is actually
+ * a full cfg from the old tables.
+ */
+ if (!iwl_trans->cfg)
+ iwl_trans->cfg = cfg;
+
+found:
+ /* if we don't have a name yet, copy name from the old cfg */
+ if (!iwl_trans->name)
+ iwl_trans->name = iwl_trans->cfg->name;
+
+ if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
+ ret = -EINVAL;
+ goto out_free_trans;
+ }
+ trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ } else {
+ trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
+ }
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 &&
iwl_trans_grab_nic_access(iwl_trans, &flags)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a091690f6c79..72f144c3a46e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,6 +106,8 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @offset: indicates which offset of the page (in bytes)
+ * this buffer uses (if multiple RBs fit into one page)
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 offset;
};
/**
@@ -305,7 +308,7 @@ struct iwl_cmd_meta {
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
- struct iwl_device_cmd *cmd;
+ void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
@@ -491,6 +494,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
@@ -510,11 +514,16 @@ struct cont_rec {
* @in_rescan: true if we have triggered a device rescan
* @base_rb_stts: base virtual address of receive buffer status for all queues
* @base_rb_stts_dma: base physical address of receive buffer status
+ * @supported_dma_mask: DMA mask to validate the actual address against,
+ * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
+ * @alloc_page_lock: spinlock for the page allocator
+ * @alloc_page: allocated page to still use parts of
+ * @alloc_page_used: how much of the allocated page was already used (bytes)
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
- struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
+ struct iwl_rx_mem_buffer *rx_pool;
+ struct iwl_rx_mem_buffer **global_table;
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
@@ -573,6 +582,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
u16 tfd_size;
+ u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
@@ -580,6 +590,13 @@ struct iwl_trans_pcie {
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
+ u32 rx_buf_bytes;
+ u32 supported_dma_mask;
+
+ /* allocator lock for the two values below */
+ spinlock_t alloc_page_lock;
+ struct page *alloc_page;
+ u32 alloc_page_used;
/*protect hw register */
spinlock_t reg_lock;
@@ -672,6 +689,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
int queue_size);
@@ -688,7 +715,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1109,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb);
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
#endif
/* common functions that are used by gen3 transport */
@@ -1106,7 +1134,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 452da44a21e0..427fcea5cb2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -240,7 +240,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
rxq->need_update = true;
return;
}
@@ -298,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
/*
@@ -318,8 +319,8 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
list);
list_del(&rxb->list);
rxb->invalid = false;
- /* 12 first bits are expected to be empty */
- WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+ /* some low bits are expected to be unset (depending on hw) */
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
@@ -412,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
*
*/
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
- gfp_t priority)
+ u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
struct page *page;
gfp_t gfp_mask = priority;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
+ if (trans_pcie->alloc_page) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ /* recheck */
+ if (trans_pcie->alloc_page) {
+ *offset = trans_pcie->alloc_page_used;
+ page = trans_pcie->alloc_page;
+ trans_pcie->alloc_page_used += rbsize;
+ if (trans_pcie->alloc_page_used >= allocsize)
+ trans_pcie->alloc_page = NULL;
+ else
+ get_page(page);
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ return page;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
@@ -436,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
"Failed to alloc_pages\n");
return NULL;
}
+
+ if (2 * rbsize <= allocsize) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ if (!trans_pcie->alloc_page) {
+ get_page(page);
+ trans_pcie->alloc_page = page;
+ trans_pcie->alloc_page_used = rbsize;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ *offset = 0;
return page;
}
@@ -456,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct page *page;
while (1) {
+ unsigned int offset;
+
spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
@@ -463,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
spin_unlock(&rxq->lock);
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, priority);
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
@@ -482,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
BUG_ON(rxb->page);
rxb->page = page;
+ rxb->offset = offset;
/* Get physical address of the RB */
rxb->page_dma =
- dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
@@ -510,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < RX_POOL_SIZE; i++) {
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
if (!trans_pcie->rx_pool[i].page)
continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL;
@@ -568,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
+ gfp_mask);
if (!page)
continue;
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, page,
+ rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order);
@@ -738,7 +773,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
spin_lock_init(&rxq->lock);
if (trans->trans_cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
+ rxq->queue_size = trans->cfg->num_rbds;
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -807,8 +842,18 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -ENOMEM;
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->rx_pool[0]),
+ GFP_KERNEL);
+ trans_pcie->global_table =
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->global_table[0]),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
+ !trans_pcie->global_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
spin_lock_init(&rba->lock);
@@ -845,6 +890,8 @@ err:
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
return ret;
@@ -1081,12 +1128,11 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
/* move the pool to the default queue and allocator ownerships */
queue_size = trans->trans_cfg->mq_rx_supported ?
- MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
- BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
- ARRAY_SIZE(trans_pcie->rx_pool));
+
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1177,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
+
+ if (trans_pcie->alloc_page)
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
}
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
@@ -1235,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
if (WARN_ON(!rxb))
@@ -1249,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool reclaim;
int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
- ._offset = offset,
+ ._offset = rxb->offset + offset,
._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
@@ -1355,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* rx_free list for reuse later. */
if (rxb->page != NULL) {
rxb->page_dma =
- dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
@@ -1390,13 +1441,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ vid = le16_to_cpu(rxq->cd[i].rbid);
else
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
- if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
rxb = trans_pcie->global_table[vid - 1];
@@ -1529,13 +1579,13 @@ out:
napi = &rxq->napi;
if (napi->poll) {
+ napi_gro_flush(napi, false);
+
if (napi->rx_count) {
netif_receive_skb_list(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}
-
- napi_gro_flush(napi, false);
}
iwl_pcie_rxq_restock(trans, rxq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0d8b2a8ffa5d..19a2c72081ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -132,8 +132,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
@@ -175,7 +174,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a0677131634d..38d8fe21690a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -79,6 +79,7 @@
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
+#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -183,8 +184,7 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_sw_reset));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
@@ -301,18 +301,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
u16 cap;
/*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
*/
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- else
- iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -487,8 +482,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -510,12 +504,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_stop_master));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_master_dis),
- BIT(trans->trans_cfg->csr->flag_master_dis), 100);
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -564,8 +557,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -1270,7 +1262,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1494,9 +1486,8 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (reset) {
/*
@@ -1561,7 +1552,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
@@ -1583,7 +1574,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1945,6 +1936,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ trans_pcie->rx_buf_bytes =
+ iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2054,7 +2050,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -2079,8 +2075,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2162,7 +2158,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2963,7 +2959,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
@@ -2989,9 +2985,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len);
/* remap the page for the free benefit */
- rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
- max_len,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page,
+ rxb->offset, max_len,
+ DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data);
}
@@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size;
+ int ret, addr_size, txcmd_size, txcmd_align;
+ const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+ if (!cfg_trans->gen2) {
+ ops = &trans_ops_pcie;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- if (cfg_trans->gen2)
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie_gen2);
- else
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie);
-
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3482,6 +3493,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8ca0250de99e..86fc00167817 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
return idx;
}
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
/*
* Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
dev_kfree_skb(csum_skb);
goto out_err;
}
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
/* put the payload */
while (data_left) {
+ int ret;
+
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
dev_kfree_skb(csum_skb);
goto out_err;
}
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
- int tb_idx;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
- if (!skb_frag_size(frag))
+ if (!fragsz)
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -ENOMEM;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
- tb_phys, skb_frag_size(frag));
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
}
return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
- tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
skb_walk_frags(skb, frag) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, frag->data,
skb_headlen(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
- tb_phys, skb_headlen(frag));
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
goto out_err;
}
@@ -538,7 +670,7 @@ out_err:
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
@@ -587,6 +719,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
int idx;
void *tfd;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -603,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1101,9 +1237,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq;
int i;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txq[txq_id];
+
if (WARN_ON(!txq))
return;
@@ -1258,6 +1400,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index f21f16ab2a97..2f69a6469fe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->write_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[read_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
txq->need_update = true;
return;
}
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb)
{
struct page **page_ptr;
+ struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
- if (*page_ptr) {
- __free_page(*page_ptr);
- *page_ptr = NULL;
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
}
}
@@ -646,7 +652,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
/*
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_cmd *dev_cmd_ptr;
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1255,16 +1261,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
if (!p->page)
goto alloc;
- /* enough room on this page */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
- return p;
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
/* We don't have enough room on this page, get a new one. */
__free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
if (!p->page)
return NULL;
p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
return p;
}
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);