aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/wireless/intel/iwlwifi/pcie
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 16:27:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 16:27:18 -0700
commitcb8e59cc87201af93dfbb6c3dccc8fcad72a09c2 (patch)
treea334db9022f89654b777bbce8c4c6632e65b9031 /drivers/net/wireless/intel/iwlwifi/pcie
parentMerge branch 'uaccess.comedi' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs (diff)
parentselftests: net: ip_defrag: ignore EPERM (diff)
downloadwireguard-linux-cb8e59cc87201af93dfbb6c3dccc8fcad72a09c2.tar.xz
wireguard-linux-cb8e59cc87201af93dfbb6c3dccc8fcad72a09c2.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: 1) Allow setting bluetooth L2CAP modes via socket option, from Luiz Augusto von Dentz. 2) Add GSO partial support to igc, from Sasha Neftin. 3) Several cleanups and improvements to r8169 from Heiner Kallweit. 4) Add IF_OPER_TESTING link state and use it when ethtool triggers a device self-test. From Andrew Lunn. 5) Start moving away from custom driver versions, use the globally defined kernel version instead, from Leon Romanovsky. 6) Support GRO vis gro_cells in DSA layer, from Alexander Lobakin. 7) Allow hard IRQ deferral during NAPI, from Eric Dumazet. 8) Add sriov and vf support to hinic, from Luo bin. 9) Support Media Redundancy Protocol (MRP) in the bridging code, from Horatiu Vultur. 10) Support netmap in the nft_nat code, from Pablo Neira Ayuso. 11) Allow UDPv6 encapsulation of ESP in the ipsec code, from Sabrina Dubroca. Also add ipv6 support for espintcp. 12) Lots of ReST conversions of the networking documentation, from Mauro Carvalho Chehab. 13) Support configuration of ethtool rxnfc flows in bcmgenet driver, from Doug Berger. 14) Allow to dump cgroup id and filter by it in inet_diag code, from Dmitry Yakunin. 15) Add infrastructure to export netlink attribute policies to userspace, from Johannes Berg. 16) Several optimizations to sch_fq scheduler, from Eric Dumazet. 17) Fallback to the default qdisc if qdisc init fails because otherwise a packet scheduler init failure will make a device inoperative. From Jesper Dangaard Brouer. 18) Several RISCV bpf jit optimizations, from Luke Nelson. 19) Correct the return type of the ->ndo_start_xmit() method in several drivers, it's netdev_tx_t but many drivers were using 'int'. From Yunjian Wang. 20) Add an ethtool interface for PHY master/slave config, from Oleksij Rempel. 21) Add BPF iterators, from Yonghang Song. 22) Add cable test infrastructure, including ethool interfaces, from Andrew Lunn. Marvell PHY driver is the first to support this facility. 23) Remove zero-length arrays all over, from Gustavo A. R. Silva. 24) Calculate and maintain an explicit frame size in XDP, from Jesper Dangaard Brouer. 25) Add CAP_BPF, from Alexei Starovoitov. 26) Support terse dumps in the packet scheduler, from Vlad Buslov. 27) Support XDP_TX bulking in dpaa2 driver, from Ioana Ciornei. 28) Add devm_register_netdev(), from Bartosz Golaszewski. 29) Minimize qdisc resets, from Cong Wang. 30) Get rid of kernel_getsockopt and kernel_setsockopt in order to eliminate set_fs/get_fs calls. From Christoph Hellwig. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2517 commits) selftests: net: ip_defrag: ignore EPERM net_failover: fixed rollback in net_failover_open() Revert "tipc: Fix potential tipc_aead refcnt leak in tipc_crypto_rcv" Revert "tipc: Fix potential tipc_node refcnt leak in tipc_rcv" vmxnet3: allow rx flow hash ops only when rss is enabled hinic: add set_channels ethtool_ops support selftests/bpf: Add a default $(CXX) value tools/bpf: Don't use $(COMPILE.c) bpf, selftests: Use bpf_probe_read_kernel s390/bpf: Use bcr 0,%0 as tail call nop filler s390/bpf: Maintain 8-byte stack alignment selftests/bpf: Fix verifier test selftests/bpf: Fix sample_cnt shared between two threads bpf, selftests: Adapt cls_redirect to call csum_level helper bpf: Add csum_level helper for fixing up csum levels bpf: Fix up bpf_skb_adjust_room helper's skb csum setting sfc: add missing annotation for efx_ef10_try_update_nic_stats_vf() crypto/chtls: IPv6 support for inline TLS Crypto/chcr: Fixes a coccinile check error Crypto/chcr: Fixes compilations warnings ...
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c132
9 files changed, 399 insertions, 447 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 9d5b1e51b50d..1ab136600415 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,32 +84,35 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_SRAM_PATH) {
+ switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
+ case IWL_FW_INI_LOCATION_SRAM_PATH:
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
-
IWL_DEBUG_FW(trans,
- "WRT: Applying SMEM buffer destination\n");
-
- goto out;
- }
-
- if (le32_to_cpu(fw_mon_cfg->buf_location) ==
- IWL_FW_INI_LOCATION_DRAM_PATH &&
- trans->dbg.fw_mon_ini[alloc_id].num_frags) {
- struct iwl_dram_data *frag =
- &trans->dbg.fw_mon_ini[alloc_id].frags[0];
-
- dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ "WRT: Applying SMEM buffer destination\n");
+ break;
+ case IWL_FW_INI_LOCATION_NPK_PATH:
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
IWL_DEBUG_FW(trans,
- "WRT: Applying DRAM destination (alloc_id=%u)\n",
- alloc_id);
+ "WRT: Applying NPK buffer destination\n");
+ break;
- dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
- dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ case IWL_FW_INI_LOCATION_DRAM_PATH:
+ if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
+ alloc_id,
+ trans->dbg.fw_mon_ini[alloc_id].num_frags);
+ }
+ break;
+ default:
+ IWL_ERR(trans, "WRT: Invalid buffer destination\n");
}
-
out:
if (dbg_flags)
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
@@ -135,9 +138,17 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
case IWL_AMSDU_2K:
break;
case IWL_AMSDU_4K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ break;
case IWL_AMSDU_8K:
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ /* if firmware supports the ext size, tell it */
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
+ break;
case IWL_AMSDU_12K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
+ /* if firmware supports the ext size, tell it */
+ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K;
break;
}
@@ -210,7 +221,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index acd01d86f101..23abfbd096b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,21 @@ static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
}
+static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sec->len,
+ &dram->physical);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -248,7 +263,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 29971c25dba4..65d65c6baf4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,11 +27,10 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -524,8 +522,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Qu devices */
{IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
{IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
@@ -539,12 +539,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
@@ -585,94 +590,72 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
-/* Qu with Hr */
- IWL_DEV_INFO(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ /* QnJ with Hr */
+ IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
+
+ /* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
-
- IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0078, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x007C, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0310, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x0A10, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x1080, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x1651, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x1652, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x2074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x4070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
- IWL_DEV_INFO(0x2720, 0x4244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+
+ IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
@@ -771,7 +754,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT,
iwl9260_2ac_cfg, iwl9260_name),
- /* Qu with Jf */
+/* Qu with Jf */
/* Qu B step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
@@ -947,6 +930,29 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+/* Qu with Hr */
+ /* Qu B step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_b0_hr1_b0, iwl_ax101_name),
+
+ /* Qu C step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_qu_c0_hr1_b0, iwl_ax101_name),
+
+ /* QuZ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_quz_a0_hr1_b0, iwl_ax101_name),
+
#endif /* CONFIG_IWLMVM */
};
@@ -1044,29 +1050,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
- } else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
- (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
- IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
- return -EINVAL;
- } else {
- IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n",
- CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id));
- return -EINVAL;
- }
}
/*
@@ -1076,9 +1059,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* rest must be removed once we convert Qu with Hr as well.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
@@ -1088,9 +1069,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
@@ -1146,12 +1125,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* register transport layer debugfs here */
iwl_trans_pcie_dbgfs_register(iwl_trans);
- /* The PCI device starts with a reference taken and we are
- * supposed to release it here. But to simplify the
- * interaction with the opmode, we don't do it now, but let
- * the opmode release it when it's ready.
- */
-
return 0;
out_free_trans:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 595e6873d56e..55808ba10d27 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -189,6 +189,8 @@ struct iwl_rx_completion_desc {
* @rb_stts_dma: bus address of receive buffer status
* @lock:
* @queue: actual rx queue. Not used for multi-rx queue.
+ * @next_rb_is_fragment: indicates that the previous RB that we handled set
+ * the fragmented flag, so the next one is still another fragment
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
@@ -214,7 +216,7 @@ struct iwl_rxq {
u32 queue_size;
struct list_head rx_free;
struct list_head rx_used;
- bool need_update;
+ bool need_update, next_rb_is_fragment;
void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -244,12 +246,6 @@ struct iwl_rb_allocator {
struct work_struct rx_alloc;
};
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
-
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
@@ -288,107 +284,6 @@ static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- u32 flags;
- u32 tbs;
-};
-
-/*
- * The FH will write back to the first TB only, so we need to copy some data
- * into the buffer regardless of whether it should be mapped or not.
- * This indicates how big the first TB must be to include the scratch buffer
- * and the assigned PN.
- * Since PN location is 8 bytes at offset 12, it's 20 now.
- * If we make it bigger then allocations will be bigger and copy slower, so
- * that's probably not useful.
- */
-#define IWL_FIRST_TB_SIZE 20
-#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
-
-struct iwl_pcie_txq_entry {
- void *cmd;
- struct sk_buff *skb;
- /* buffer to free after command completes */
- const void *free_buf;
- struct iwl_cmd_meta meta;
-};
-
-struct iwl_pcie_first_tb_buf {
- u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
-};
-
-/**
- * struct iwl_txq - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @tfds: transmit frame descriptors (DMA memory)
- * @first_tb_bufs: start of command headers, including scratch buffers, for
- * the writeback -- this is DMA memory and an array holding one buffer
- * for each command on the queue
- * @first_tb_dma: DMA address for the first_tb_bufs start
- * @entries: transmit entries (driver state)
- * @lock: queue lock
- * @stuck_timer: timer that fires if queue gets stuck
- * @trans_pcie: pointer back to transport (for timer)
- * @need_update: indicates need to update read/write index
- * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
- * @wd_timeout: queue watchdog timeout (jiffies) - per queue
- * @frozen: tx stuck queue timer is frozen
- * @frozen_expiry_remainder: remember how long until the timer fires
- * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
- * @write_ptr: 1-st empty entry (index) host_w
- * @read_ptr: last used entry (index) host_r
- * @dma_addr: physical addr for BD's
- * @n_window: safe queue window
- * @id: queue id
- * @low_mark: low watermark, resume queue if free space more than this
- * @high_mark: high watermark, stop queue if free space less than this
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_txq {
- void *tfds;
- struct iwl_pcie_first_tb_buf *first_tb_bufs;
- dma_addr_t first_tb_dma;
- struct iwl_pcie_txq_entry *entries;
- spinlock_t lock;
- unsigned long frozen_expiry_remainder;
- struct timer_list stuck_timer;
- struct iwl_trans_pcie *trans_pcie;
- bool need_update;
- bool frozen;
- bool ampdu;
- int block;
- unsigned long wd_timeout;
- struct sk_buff_head overflow_q;
- struct iwl_dma_ptr bc_tbl;
-
- int write_ptr;
- int read_ptr;
- dma_addr_t dma_addr;
- int n_window;
- u32 id;
- int low_mark;
- int high_mark;
-
- bool overflow_tx;
-};
-
static inline dma_addr_t
iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
{
@@ -556,11 +451,9 @@ struct iwl_trans_pcie {
u32 scd_base_addr;
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
+ struct dma_pool *bc_pool;
struct iwl_txq *txq_memory;
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */
struct pci_dev *pci_dev;
@@ -574,10 +467,7 @@ struct iwl_trans_pcie {
u8 page_offs, dev_cmd_offs;
- u8 cmd_queue;
u8 def_rx_queue;
- u8 cmd_fifo;
- unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
@@ -792,22 +682,6 @@ static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
return i;
}
-static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -996,9 +870,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
@@ -1007,9 +879,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8c29071cb415..24cb1b1f21f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1284,7 +1284,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1427,7 +1427,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
- struct iwl_rxq *rxq, int i)
+ struct iwl_rxq *rxq, int i,
+ bool *join)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@@ -1441,10 +1442,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
vid = le16_to_cpu(rxq->cd[i].rbid);
- else
+ *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else {
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+ }
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
@@ -1502,6 +1505,7 @@ restart:
u32 rb_pending_alloc =
atomic_read(&trans_pcie->rba.req_pending) *
RX_CLAIM_REQ_ALLOC;
+ bool join = false;
if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
!emergency)) {
@@ -1514,11 +1518,29 @@ restart:
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
- rxb = iwl_pcie_get_rxb(trans, rxq, i);
+ rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
if (!rxb)
goto out;
- iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
+ if (unlikely(join || rxq->next_rb_is_fragment)) {
+ rxq->next_rb_is_fragment = join;
+ /*
+ * We can only get a multi-RB in the following cases:
+ * - firmware issue, sending a too big notification
+ * - sniffer mode with a large A-MSDU
+ * - large MTU frames (>2k)
+ * since the multi-RB functionality is limited to newer
+ * hardware that cannot put multiple entries into a
+ * single RB.
+ *
+ * Right now, the higher layers aren't set up to deal
+ * with that, so discard all of these.
+ */
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else {
+ iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
+ }
i = (i + 1) & (rxq->queue_size - 1);
@@ -1649,9 +1671,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- if (!trans_pcie->txq[i])
+ if (!trans->txqs.txq[i])
continue;
- del_timer(&trans_pcie->txq[i]->stuck_timer);
+ del_timer(&trans->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 19a2c72081ab..97c9e9c87436 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size))
+ if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -262,8 +262,9 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index e4cbd8daa7c6..e5160d620868 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +68,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/wait.h>
+#include <linux/seq_file.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@@ -1018,21 +1017,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
return ret;
}
- /* supported for 7000 only for the moment */
- if (iwlwifi_mod_params.fw_monitor &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
-
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (fw_mon->size) {
- iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- fw_mon->physical >> 4);
- iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (fw_mon->physical + fw_mon->size) >> 4);
- }
- } else if (iwl_pcie_dbg_on(trans)) {
+ if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
- }
iwl_enable_interrupts(trans);
@@ -1507,14 +1493,10 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- /*
- * Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW.
- */
- if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ if (!reset)
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
- }
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
@@ -1922,9 +1904,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- trans_pcie->cmd_queue = trans_cfg->cmd_queue;
- trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
- trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
+ trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
+ trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -2217,11 +2199,10 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs,
bool freeze)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans_pcie->txq[queue];
+ struct iwl_txq *txq = trans->txqs.txq[queue];
unsigned long now;
spin_lock_bh(&txq->lock);
@@ -2269,13 +2250,12 @@ next_queue:
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txq[i];
+ struct iwl_txq *txq = trans->txqs.txq[i];
- if (i == trans_pcie->cmd_queue)
+ if (i == trans->txqs.cmd.q_id)
continue;
spin_lock_bh(&txq->lock);
@@ -2344,7 +2324,6 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
@@ -2354,11 +2333,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!test_bit(txq_idx, trans_pcie->queue_used))
+ if (!test_bit(txq_idx, trans->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans_pcie->txq[txq_idx];
+ txq = trans->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
@@ -2406,7 +2385,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
@@ -2415,9 +2393,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans_pcie->cmd_queue)
+ if (cnt == trans->txqs.cmd.q_id)
continue;
- if (!test_bit(cnt, trans_pcie->queue_used))
+ if (!test_bit(cnt, trans->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
@@ -2544,44 +2522,94 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.llseek = generic_file_llseek, \
};
-static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+struct iwl_dbgfs_tx_queue_priv {
+ struct iwl_trans *trans;
+};
+
+struct iwl_dbgfs_tx_queue_state {
+ loff_t pos;
+};
+
+static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- size_t bufsz;
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state;
+
+ if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ return NULL;
- bufsz = sizeof(char) * 75 *
- trans->trans_cfg->base_params->num_of_queues;
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pos = *pos;
+ return state;
+}
- if (!trans_pcie->txq_memory)
- return -EAGAIN;
+static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
+ *pos = ++state->pos;
+
+ if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ return NULL;
+
+ return state;
+}
+
+static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
+{
+ kfree(v);
+}
+
+static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
+ struct iwl_dbgfs_tx_queue_state *state = v;
+ struct iwl_trans *trans = priv->trans;
+ struct iwl_txq *txq = trans->txqs.txq[state->pos];
+
+ seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
+ (unsigned int)state->pos,
+ !!test_bit(state->pos, trans->txqs.queue_used),
+ !!test_bit(state->pos, trans->txqs.queue_stopped));
+ if (txq)
+ seq_printf(seq,
+ "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
+ txq->read_ptr, txq->write_ptr,
+ txq->need_update, txq->frozen,
+ txq->n_window, txq->ampdu);
+ else
+ seq_puts(seq, "(unallocated)");
+
+ if (state->pos == trans->txqs.cmd.q_id)
+ seq_puts(seq, " (HCMD)");
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
+ .start = iwl_dbgfs_tx_queue_seq_start,
+ .next = iwl_dbgfs_tx_queue_seq_next,
+ .stop = iwl_dbgfs_tx_queue_seq_stop,
+ .show = iwl_dbgfs_tx_queue_seq_show,
+};
+
+static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
+{
+ struct iwl_dbgfs_tx_queue_priv *priv;
+
+ priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
+ sizeof(*priv));
+
+ if (!priv)
return -ENOMEM;
- for (cnt = 0;
- cnt < trans->trans_cfg->base_params->num_of_queues;
- cnt++) {
- txq = trans_pcie->txq[cnt];
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, txq->read_ptr, txq->write_ptr,
- !!test_bit(cnt, trans_pcie->queue_used),
- !!test_bit(cnt, trans_pcie->queue_stopped),
- txq->need_update, txq->frozen,
- (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ priv->trans = inode->i_private;
+ return 0;
}
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
@@ -2914,9 +2942,15 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+static const struct file_operations iwl_dbgfs_tx_queue_ops = {
+ .owner = THIS_MODULE,
+ .open = iwl_dbgfs_tx_queue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
static const struct file_operations iwl_dbgfs_monitor_data_ops = {
.read = iwl_dbgfs_monitor_data_read,
@@ -3226,7 +3260,7 @@ static struct iwl_trans_dump_data
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@@ -3627,6 +3661,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (cfg_trans->gen2) {
+ size_t bc_tbl_size;
+
+ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
+ bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ else
+ bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+
+ trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
+ bc_tbl_size, 256, 0);
+ if (!trans_pcie->bc_pool)
+ goto out_no_pci;
+ }
+
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9664dbc70ef1..7fc7542535d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,6 @@
*/
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
/*
@@ -72,12 +71,13 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
- if (!trans_pcie->txq[txq_id])
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+ if (!trans->txqs.txq[txq_id])
continue;
iwl_pcie_gen2_txq_unmap(trans, txq_id);
}
@@ -90,9 +90,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
@@ -102,7 +100,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
+ num_tbs * sizeof(struct iwl_tfh_tb);
/*
* filled_tfd_size contains the number of filled bytes in the TFD.
* Dividing it by 64 will give the number of chunks to fetch
@@ -114,12 +112,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans_pcie->bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
} else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
/* Before AX210, the HW expects DW */
WARN_ON(!trans_pcie->bc_table_dword);
len = DIV_ROUND_UP(len, 4);
@@ -714,7 +716,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
u16 cmd_len;
int idx;
void *tfd;
@@ -723,7 +725,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
"queue %d out of range", txq_id))
return -EINVAL;
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -817,7 +819,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -929,7 +931,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -977,7 +979,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1054,7 +1056,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
@@ -1173,14 +1175,14 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->cmd_queue) {
+ if (txq_id != trans->txqs.cmd.q_id) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
@@ -1222,7 +1224,9 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
}
kfree(txq->entries);
- iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
+ txq->bc_tbl.dma);
kfree(txq);
}
@@ -1236,7 +1240,6 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
*/
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
int i;
@@ -1244,7 +1247,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
"queue %d out of range", txq_id))
return;
- txq = trans_pcie->txq[txq_id];
+ txq = trans->txqs.txq[txq_id];
if (WARN_ON(!txq))
return;
@@ -1252,7 +1255,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
@@ -1261,27 +1264,38 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_gen2_txq_free_memory(trans, txq);
- trans_pcie->txq[txq_id] = NULL;
+ trans->txqs.txq[txq_id] = NULL;
- clear_bit(txq_id, trans_pcie->queue_used);
+ clear_bit(txq_id, trans->txqs.queue_used);
}
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
struct iwl_txq **intxq, int size,
unsigned int timeout)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
int ret;
- struct iwl_txq *txq;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+ } else {
+ bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+ }
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return -EINVAL;
+
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
return -ENOMEM;
- ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
- (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210) ?
- sizeof(struct iwl_gen3_bc_tbl) :
- sizeof(struct iwlagn_scd_bc_tbl));
- if (ret) {
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
return -ENOMEM;
@@ -1312,7 +1326,6 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_host_cmd *hcmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue_cfg_rsp *rsp;
int ret, qid;
u32 wr_ptr;
@@ -1327,20 +1340,20 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
qid = le16_to_cpu(rsp->queue_number);
wr_ptr = le16_to_cpu(rsp->write_pointer);
- if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
+ if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO;
goto error_free_resp;
}
- if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+ if (test_and_set_bit(qid, trans->txqs.queue_used)) {
WARN_ONCE(1, "queue %d already used", qid);
ret = -EIO;
goto error_free_resp;
}
txq->id = qid;
- trans_pcie->txq[qid] = txq;
+ trans->txqs.txq[qid] = txq;
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
@@ -1398,8 +1411,6 @@ error:
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", queue))
return;
@@ -1410,7 +1421,7 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", queue);
return;
@@ -1418,22 +1429,21 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue);
- iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
- trans_pcie->txq[queue] = NULL;
+ iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
+ trans->txqs.txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
- if (!trans_pcie->txq[i])
+ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
+ if (!trans->txqs.txq[i])
continue;
iwl_pcie_gen2_txq_free(trans, i);
@@ -1442,35 +1452,34 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *queue;
int ret;
/* alloc and init the tx queue */
- if (!trans_pcie->txq[txq_id]) {
+ if (!trans->txqs.txq[txq_id]) {
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) {
IWL_ERR(trans, "Not enough memory for tx queue\n");
return -ENOMEM;
}
- trans_pcie->txq[txq_id] = queue;
+ trans->txqs.txq[txq_id] = queue;
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
}
} else {
- queue = trans_pcie->txq[txq_id];
+ queue = trans->txqs.txq[txq_id];
}
ret = iwl_pcie_txq_init(trans, queue, queue_size,
- (txq_id == trans_pcie->cmd_queue));
+ (txq_id == trans->txqs.cmd.q_id));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans_pcie->txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans_pcie->queue_used);
+ trans->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans->txqs.queue_used);
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4582d418ba4d..5c6c3fa0d29f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -8,7 +8,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -183,8 +183,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
{
struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ struct iwl_trans *trans = txq->trans;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@@ -262,7 +261,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans_pcie->cmd_queue)
+ if (txq_id != trans->txqs.cmd.q_id)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -280,7 +279,6 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -293,7 +291,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans_pcie->cmd_queue &&
+ txq_id != trans->txqs.cmd.q_id &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -324,13 +322,12 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txq[i];
+ struct iwl_txq *txq = trans->txqs.txq[i];
- if (!test_bit(i, trans_pcie->queue_used))
+ if (!test_bit(i, trans->txqs.queue_used))
continue;
spin_lock_bh(&txq->lock);
@@ -535,7 +532,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
tfd_sz = trans_pcie->tfd_size * slots_num;
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
- txq->trans_pcie = trans_pcie;
+ txq->trans = trans;
txq->n_window = slots_num;
@@ -661,14 +658,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->cmd_queue) {
+ if (txq_id != trans->txqs.cmd.q_id) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
@@ -683,7 +680,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
@@ -712,7 +709,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -722,7 +719,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
@@ -761,8 +758,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -784,9 +782,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
- trans_pcie->cmd_fifo,
- trans_pcie->cmd_q_wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
+ trans->txqs.cmd.fifo,
+ trans->txqs.cmd.wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -822,7 +820,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
if (trans->trans_cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -898,8 +896,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* This can happen: start_hw, stop_device */
if (!trans_pcie->txq_memory)
@@ -923,7 +922,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Tx queues */
if (trans_pcie->txq_memory) {
@@ -931,7 +930,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
- trans_pcie->txq[txq_id] = NULL;
+ trans->txqs.txq[txq_id] = NULL;
}
}
@@ -954,10 +953,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
- bc_tbls_size *= (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210) ?
- sizeof(struct iwl_gen3_bc_tbl) :
- sizeof(struct iwlagn_scd_bc_tbl);
+ if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
+ return -EINVAL;
+
+ bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -992,7 +991,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -1000,14 +999,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+ trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans_pcie->txq[txq_id]->id = txq_id;
+ trans->txqs.txq[txq_id]->id = txq_id;
}
return 0;
@@ -1046,7 +1045,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -1054,7 +1053,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+ ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
@@ -1068,7 +1067,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans_pcie->txq[txq_id]->dma_addr >> 8);
+ trans->txqs.txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -1113,18 +1112,18 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+ if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
return;
spin_lock_bh(&txq->lock);
- if (!test_bit(txq_id, trans_pcie->queue_used)) {
+ if (!test_bit(txq_id, trans->txqs.queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
txq_id, ssn);
goto out;
@@ -1176,7 +1175,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
if (iwl_queue_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans_pcie->queue_stopped)) {
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
struct sk_buff_head overflow_skbs;
__skb_queue_head_init(&overflow_skbs);
@@ -1229,8 +1228,7 @@ out:
/* Set wr_ptr of specific device and txq */
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
@@ -1290,7 +1288,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
unsigned long flags;
int nfreed = 0;
u16 r;
@@ -1302,7 +1300,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
- WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
+ WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
trans->trans_cfg->base_params->max_tfd_queue_size,
@@ -1364,11 +1362,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
int fifo = -1;
bool scd_bug = false;
- if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+ if (test_and_set_bit(txq_id, trans->txqs.queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
@@ -1377,7 +1375,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans_pcie->cmd_queue &&
+ if (txq_id == trans->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, 0);
@@ -1385,7 +1383,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans_pcie->cmd_queue)
+ if (txq_id != trans->txqs.cmd.q_id)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -1455,7 +1453,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans_pcie->cmd_queue &&
+ if (txq_id == trans->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
@@ -1474,8 +1472,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
txq->ampdu = !shared_mode;
}
@@ -1488,8 +1485,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
- trans_pcie->txq[txq_id]->frozen = false;
+ trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans->txqs.txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -1497,7 +1494,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+ if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", txq_id);
return;
@@ -1511,7 +1508,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txq[txq_id]->ampdu = false;
+ trans->txqs.txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -1531,7 +1528,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1657,7 +1654,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1665,7 +1662,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1716,7 +1713,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1816,14 +1813,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans_pcie->cmd_queue,
+ if (WARN(txq_id != trans->txqs.cmd.q_id,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+ txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1895,7 +1892,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
@@ -2129,7 +2126,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(txq->trans);
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -2332,9 +2330,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = trans_pcie->txq[txq_id];
+ txq = trans->txqs.txq[txq_id];
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;