summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorstsp <stsp@openbsd.org>2020-02-15 08:47:14 +0000
committerstsp <stsp@openbsd.org>2020-02-15 08:47:14 +0000
commit5d6d7b90d86184a19f3b12908d28b2b452375f9f (patch)
tree8702b2c4b8a876b4d7ec6000e0694a3aee2bb268
parentRemove needless #ifdef. (diff)
downloadwireguard-openbsd-5d6d7b90d86184a19f3b12908d28b2b452375f9f.tar.xz
wireguard-openbsd-5d6d7b90d86184a19f3b12908d28b2b452375f9f.zip
Add iwx(4), a new driver for Intel AX200 wifi devices.
The iwx(4) driver is based on code from iwm(4) and Linux iwlwifi. We are using a separate driver for this hardware family to avoid introducing even more complexity to iwm(4). Firmware loading and the Tx machinery in particular work differently compared to iwm(4) devices. Device firmware can be installed with fw_update(8). These devices support Tx rate scaling and Tx aggregation in firmware but we leave this disabled for now and adapt the Tx rate with MiRa/AMRR instead. Active scans (scans with probe requests) and background scans don't work yet. Otherwise this driver has the same feature set as iwm(4).
-rw-r--r--sys/dev/pci/files.pci7
-rw-r--r--sys/dev/pci/if_iwx.c7899
-rw-r--r--sys/dev/pci/if_iwxreg.h6206
-rw-r--r--sys/dev/pci/if_iwxvar.h537
4 files changed, 14648 insertions, 1 deletions
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci
index 029ba2f3cc3..84771454407 100644
--- a/sys/dev/pci/files.pci
+++ b/sys/dev/pci/files.pci
@@ -1,4 +1,4 @@
-# $OpenBSD: files.pci,v 1.343 2020/01/11 00:56:37 jsg Exp $
+# $OpenBSD: files.pci,v 1.344 2020/02/15 08:47:14 stsp Exp $
# $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $
#
# Config file and device description for machine-independent PCI code.
@@ -556,6 +556,11 @@ device iwm: ifnet, wlan, firmload
attach iwm at pci
file dev/pci/if_iwm.c iwm
+# Intel Wireless WiFi 22xxx
+device iwx: ifnet, wlan, firmload
+attach iwx at pci
+file dev/pci/if_iwx.c iwx
+
# C-Media CMI8x38 Audio Chip
device cmpci {}: audio
attach cmpci at pci
diff --git a/sys/dev/pci/if_iwx.c b/sys/dev/pci/if_iwx.c
new file mode 100644
index 00000000000..b7fb06f887f
--- /dev/null
+++ b/sys/dev/pci/if_iwx.c
@@ -0,0 +1,7899 @@
+/* $OpenBSD: if_iwx.c,v 1.1 2020/02/15 08:47:14 stsp Exp $ */
+
+/*
+ * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
+ * Author: Stefan Sperling <stsp@openbsd.org>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+
+#include <sys/refcnt.h>
+#include <sys/task.h>
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_amrr.h>
+#include <net80211/ieee80211_mira.h>
+#include <net80211/ieee80211_radiotap.h>
+
+#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
+
+#define IC2IFP(_ic_) (&(_ic_)->ic_if)
+
+#define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
+#define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
+
+#ifdef IWX_DEBUG
+#define DPRINTF(x) do { if (iwx_debug > 0) printf x; } while (0)
+#define DPRINTFN(n, x) do { if (iwx_debug >= (n)) printf x; } while (0)
+int iwx_debug = 1;
+#else
+#define DPRINTF(x) do { ; } while (0)
+#define DPRINTFN(n, x) do { ; } while (0)
+#endif
+
+#include <dev/pci/if_iwxreg.h>
+#include <dev/pci/if_iwxvar.h>
+
+const uint8_t iwx_nvm_channels_8000[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
+#define IWX_NUM_2GHZ_CHANNELS 14
+
+const struct iwx_rate {
+ uint16_t rate;
+ uint8_t plcp;
+ uint8_t ht_plcp;
+} iwx_rates[] = {
+ /* Legacy */ /* HT */
+ { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
+ { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
+ { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
+ { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
+ { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
+ { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
+ { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
+ { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
+ { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
+ { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
+ { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
+ { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
+ { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
+ { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
+ { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
+ { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
+};
+#define IWX_RIDX_CCK 0
+#define IWX_RIDX_OFDM 4
+#define IWX_RIDX_MAX (nitems(iwx_rates)-1)
+#define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
+#define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
+#define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
+
+/* Convert an MCS index into an iwx_rates[] index. */
+const int iwx_mcs2ridx[] = {
+ IWX_RATE_MCS_0_INDEX,
+ IWX_RATE_MCS_1_INDEX,
+ IWX_RATE_MCS_2_INDEX,
+ IWX_RATE_MCS_3_INDEX,
+ IWX_RATE_MCS_4_INDEX,
+ IWX_RATE_MCS_5_INDEX,
+ IWX_RATE_MCS_6_INDEX,
+ IWX_RATE_MCS_7_INDEX,
+ IWX_RATE_MCS_8_INDEX,
+ IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_MCS_10_INDEX,
+ IWX_RATE_MCS_11_INDEX,
+ IWX_RATE_MCS_12_INDEX,
+ IWX_RATE_MCS_13_INDEX,
+ IWX_RATE_MCS_14_INDEX,
+ IWX_RATE_MCS_15_INDEX,
+};
+
+struct iwx_nvm_section {
+ uint16_t length;
+ uint8_t *data;
+};
+
+uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
+int iwx_is_mimo_ht_plcp(uint8_t);
+int iwx_is_mimo_mcs(int);
+int iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
+int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
+int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
+int iwx_apply_debug_destination(struct iwx_softc *);
+int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
+void iwx_ctxt_info_free(struct iwx_softc *);
+void iwx_ctxt_info_free_paging(struct iwx_softc *);
+int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
+ struct iwx_context_info_dram *);
+int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
+ uint8_t *, size_t);
+int iwx_set_default_calib(struct iwx_softc *, const void *);
+void iwx_fw_info_free(struct iwx_fw_info *);
+int iwx_read_firmware(struct iwx_softc *);
+uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
+void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
+int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
+int iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
+int iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
+int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
+int iwx_nic_lock(struct iwx_softc *);
+void iwx_nic_assert_locked(struct iwx_softc *);
+void iwx_nic_unlock(struct iwx_softc *);
+void iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
+ uint32_t);
+void iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
+void iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
+int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
+ bus_size_t);
+void iwx_dma_contig_free(struct iwx_dma_info *);
+int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+void iwx_disable_rx_dma(struct iwx_softc *);
+void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
+void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
+void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
+void iwx_enable_rfkill_int(struct iwx_softc *);
+int iwx_check_rfkill(struct iwx_softc *);
+void iwx_enable_interrupts(struct iwx_softc *);
+void iwx_enable_fwload_interrupt(struct iwx_softc *);
+void iwx_restore_interrupts(struct iwx_softc *);
+void iwx_disable_interrupts(struct iwx_softc *);
+void iwx_ict_reset(struct iwx_softc *);
+int iwx_set_hw_ready(struct iwx_softc *);
+int iwx_prepare_card_hw(struct iwx_softc *);
+void iwx_apm_config(struct iwx_softc *);
+int iwx_apm_init(struct iwx_softc *);
+void iwx_apm_stop(struct iwx_softc *);
+int iwx_allow_mcast(struct iwx_softc *);
+void iwx_init_msix_hw(struct iwx_softc *);
+void iwx_conf_msix_hw(struct iwx_softc *, int);
+int iwx_start_hw(struct iwx_softc *);
+void iwx_stop_device(struct iwx_softc *);
+void iwx_nic_config(struct iwx_softc *);
+int iwx_nic_rx_init(struct iwx_softc *);
+int iwx_nic_init(struct iwx_softc *);
+int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
+void iwx_post_alive(struct iwx_softc *);
+void iwx_protect_session(struct iwx_softc *, struct iwx_node *, uint32_t,
+ uint32_t);
+void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
+int iwx_nvm_read_chunk(struct iwx_softc *, uint16_t, uint16_t, uint16_t,
+ uint8_t *, uint16_t *);
+int iwx_nvm_read_section(struct iwx_softc *, uint16_t, uint8_t *,
+ uint16_t *, size_t);
+void iwx_init_channel_map(struct iwx_softc *, const uint16_t * const,
+ const uint8_t *nvm_channels, int nchan);
+void iwx_setup_ht_rates(struct iwx_softc *);
+void iwx_htprot_task(void *);
+void iwx_update_htprot(struct ieee80211com *, struct ieee80211_node *);
+int iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
+ uint8_t);
+void iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
+ uint8_t);
+void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
+ uint16_t, uint16_t, int);
+#ifdef notyet
+int iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
+ uint8_t);
+void iwx_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
+ uint8_t);
+#endif
+void iwx_ba_task(void *);
+
+int iwx_parse_nvm_data(struct iwx_softc *, const uint16_t *,
+ const uint16_t *, const uint16_t *,
+ const uint16_t *, const uint16_t *,
+ const uint16_t *, int);
+void iwx_set_hw_address_8000(struct iwx_softc *, struct iwx_nvm_data *,
+ const uint16_t *, const uint16_t *);
+int iwx_parse_nvm_sections(struct iwx_softc *, struct iwx_nvm_section *);
+int iwx_nvm_init(struct iwx_softc *);
+int iwx_load_firmware(struct iwx_softc *);
+int iwx_start_fw(struct iwx_softc *);
+int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
+int iwx_send_phy_cfg_cmd(struct iwx_softc *);
+int iwx_load_ucode_wait_alive(struct iwx_softc *);
+int iwx_send_dqa_cmd(struct iwx_softc *);
+int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
+int iwx_config_ltr(struct iwx_softc *);
+void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
+int iwx_rx_addbuf(struct iwx_softc *, int, int);
+int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
+void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
+void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, int, int, uint32_t,
+ struct ieee80211_rxinfo *, struct mbuf_list *);
+void iwx_enable_ht_cck_fallback(struct iwx_softc *, struct iwx_node *);
+void iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_node *);
+void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
+void iwx_phy_ctxt_cmd_hdr(struct iwx_softc *, struct iwx_phy_ctxt *,
+ struct iwx_phy_context_cmd *, uint32_t, uint32_t);
+void iwx_phy_ctxt_cmd_data(struct iwx_softc *, struct iwx_phy_context_cmd *,
+ struct ieee80211_channel *, uint8_t, uint8_t);
+int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
+ uint8_t, uint32_t, uint32_t);
+int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
+int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
+ const void *);
+int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
+ uint32_t *);
+int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
+ const void *, uint32_t *);
+void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
+void iwx_cmd_done(struct iwx_softc *, int, int, int);
+const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
+ struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
+void iwx_tx_update_byte_tbl(struct iwx_tx_ring *, uint16_t, uint16_t);
+int iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *, int);
+int iwx_flush_tx_path(struct iwx_softc *);
+int iwx_beacon_filter_send_cmd(struct iwx_softc *,
+ struct iwx_beacon_filter_cmd *);
+int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
+void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_power_cmd *);
+int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
+int iwx_power_update_device(struct iwx_softc *);
+int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
+int iwx_disable_beacon_filter(struct iwx_softc *);
+int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
+int iwx_add_aux_sta(struct iwx_softc *);
+int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
+int iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
+int iwx_config_umac_scan(struct iwx_softc *);
+int iwx_umac_scan(struct iwx_softc *, int);
+uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
+int iwx_rval2ridx(int);
+void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
+void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_ctx_cmd *, uint32_t);
+void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_data_sta *, int);
+int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
+int iwx_clear_statistics(struct iwx_softc *);
+int iwx_update_quotas(struct iwx_softc *, struct iwx_node *, int);
+void iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
+void iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
+int iwx_scan(struct iwx_softc *);
+int iwx_bgscan(struct ieee80211com *);
+int iwx_umac_scan_abort(struct iwx_softc *);
+int iwx_scan_abort(struct iwx_softc *);
+int iwx_enable_data_tx_queues(struct iwx_softc *);
+int iwx_auth(struct iwx_softc *);
+int iwx_deauth(struct iwx_softc *);
+int iwx_assoc(struct iwx_softc *);
+int iwx_disassoc(struct iwx_softc *);
+int iwx_run(struct iwx_softc *);
+int iwx_run_stop(struct iwx_softc *);
+struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
+void iwx_calib_timeout(void *);
+int iwx_media_change(struct ifnet *);
+void iwx_newstate_task(void *);
+int iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
+void iwx_endscan(struct iwx_softc *);
+void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
+ struct ieee80211_node *);
+int iwx_sf_config(struct iwx_softc *, int);
+int iwx_send_bt_init_conf(struct iwx_softc *);
+int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
+int iwx_init_hw(struct iwx_softc *);
+int iwx_init(struct ifnet *);
+void iwx_start(struct ifnet *);
+void iwx_stop(struct ifnet *);
+void iwx_watchdog(struct ifnet *);
+int iwx_ioctl(struct ifnet *, u_long, caddr_t);
+const char *iwx_desc_lookup(uint32_t);
+void iwx_nic_error(struct iwx_softc *);
+void iwx_nic_umac_error(struct iwx_softc *);
+int iwx_rx_pkt_valid(struct iwx_rx_packet *);
+void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
+ struct mbuf_list *);
+void iwx_notif_intr(struct iwx_softc *);
+int iwx_intr(void *);
+int iwx_intr_msix(void *);
+int iwx_match(struct device *, void *, void *);
+int iwx_preinit(struct iwx_softc *);
+void iwx_attach_hook(struct device *);
+void iwx_attach(struct device *, struct device *, void *);
+void iwx_init_task(void *);
+int iwx_activate(struct device *, int);
+int iwx_resume(struct iwx_softc *);
+
+#if NBPFILTER > 0
+void iwx_radiotap_attach(struct iwx_softc *);
+#endif
+
+#ifdef notyet
+uint8_t
+iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
+{
+ const struct iwx_fw_cmd_version *entry;
+ int i;
+
+ for (i = 0; i < sc->n_cmd_versions; i++) {
+ entry = &sc->cmd_versions[i];
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWX_FW_CMD_VER_UNKNOWN;
+}
+#endif
+
+int
+iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
+{
+ return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
+ (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
+}
+
+int
+iwx_is_mimo_mcs(int mcs)
+{
+ int ridx = iwx_mcs2ridx[mcs];
+ return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
+
+}
+
+int
+iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
+{
+ struct iwx_fw_cscheme_list *l = (void *)data;
+
+ if (dlen < sizeof(*l) ||
+ dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
+ return EINVAL;
+
+ /* we don't actually store anything for now, always use s/w crypto */
+
+ return 0;
+}
+
+int
+iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
+ const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
+{
+ int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
+ if (err) {
+ printf("%s: could not allocate context info DMA memory\n",
+ DEVNAME(sc));
+ return err;
+ }
+
+ memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
+
+ return 0;
+}
+
+void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i;
+
+ if (!dram->paging)
+ return;
+
+ /* free paging*/
+ for (i = 0; i < dram->paging_cnt; i++)
+ iwx_dma_contig_free(dram->paging);
+
+ free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
+ dram->paging_cnt = 0;
+ dram->paging = NULL;
+}
+
+int
+iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
+{
+ int i = 0;
+
+ while (start < fws->fw_count &&
+ fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
+ fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+int
+iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
+ struct iwx_context_info_dram *ctxt_dram)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i, ret, lmac_cnt, umac_cnt, paging_cnt;
+
+ KASSERT(dram->paging == NULL);
+
+ lmac_cnt = iwx_get_num_sections(fws, 0);
+ /* add 1 due to separator */
+ umac_cnt = iwx_get_num_sections(fws, lmac_cnt + 1);
+ /* add 2 due to separators */
+ paging_cnt = iwx_get_num_sections(fws, lmac_cnt + umac_cnt + 2);
+
+ dram->fw = mallocarray(umac_cnt + lmac_cnt, sizeof(*dram->fw),
+ M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (!dram->fw)
+ return ENOMEM;
+ dram->paging = mallocarray(paging_cnt, sizeof(*dram->paging),
+ M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (!dram->paging)
+ return ENOMEM;
+
+ /* initialize lmac sections */
+ for (i = 0; i < lmac_cnt; i++) {
+ ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
+ &dram->fw[dram->fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->lmac_img[i] =
+ htole64(dram->fw[dram->fw_cnt].paddr);
+ DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
+ (unsigned long long)dram->fw[dram->fw_cnt].paddr,
+ (unsigned long long)dram->fw[dram->fw_cnt].size));
+ dram->fw_cnt++;
+ }
+
+ /* initialize umac sections */
+ for (i = 0; i < umac_cnt; i++) {
+ /* access FW with +1 to make up for lmac separator */
+ ret = iwx_ctxt_info_alloc_dma(sc,
+ &fws->fw_sect[dram->fw_cnt + 1], &dram->fw[dram->fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->umac_img[i] =
+ htole64(dram->fw[dram->fw_cnt].paddr);
+ DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
+ (unsigned long long)dram->fw[dram->fw_cnt].paddr,
+ (unsigned long long)dram->fw[dram->fw_cnt].size));
+ dram->fw_cnt++;
+ }
+
+ /*
+ * Initialize paging.
+ * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+ * stored separately.
+ * This is since the timing of its release is different -
+ * while fw memory can be released on alive, the paging memory can be
+ * freed only when the device goes down.
+ * Given that, the logic here in accessing the fw image is a bit
+ * different - fw_cnt isn't changing so loop counter is added to it.
+ */
+ for (i = 0; i < paging_cnt; i++) {
+ /* access FW with +2 to make up for lmac & umac separators */
+ int fw_idx = dram->fw_cnt + i + 2;
+
+ ret = iwx_ctxt_info_alloc_dma(sc,
+ &fws->fw_sect[fw_idx], &dram->paging[i]);
+ if (ret)
+ return ret;
+
+ ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
+ DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
+ (unsigned long long)dram->paging[i].paddr,
+ (unsigned long long)dram->paging[i].size));
+ dram->paging_cnt++;
+ }
+
+ return 0;
+}
+
+int
+iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
+ uint8_t min_power)
+{
+ struct iwx_dma_info *fw_mon = &sc->fw_mon;
+ uint32_t size = 0;
+ uint8_t power;
+ int err;
+
+ if (fw_mon->size)
+ return 0;
+
+ for (power = max_power; power >= min_power; power--) {
+ size = (1 << power);
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
+ if (err)
+ continue;
+
+ DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
+ DEVNAME(sc), size));
+ break;
+ }
+
+ if (err) {
+ fw_mon->size = 0;
+ return err;
+ }
+
+ if (power != max_power)
+ DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
+ DEVNAME(sc), (unsigned long)(1 << (power - 10)),
+ (unsigned long)(1 << (max_power - 10))));
+
+ return 0;
+}
+
+int
+iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
+{
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (max_power > 26) {
+ DPRINTF(("%s: External buffer size for monitor is too big %d, "
+ "check the FW TLV\n", DEVNAME(sc), max_power));
+ return 0;
+ }
+
+ if (sc->fw_mon.size)
+ return 0;
+
+ return iwx_alloc_fw_monitor_block(sc, max_power, 11);
+}
+
+int
+iwx_apply_debug_destination(struct iwx_softc *sc)
+{
+ struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
+ int i, err;
+ uint8_t mon_mode, size_power, base_shift, end_shift;
+ uint32_t base_reg, end_reg;
+
+ dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
+ mon_mode = dest_v1->monitor_mode;
+ size_power = dest_v1->size_power;
+ base_reg = le32toh(dest_v1->base_reg);
+ end_reg = le32toh(dest_v1->end_reg);
+ base_shift = dest_v1->base_shift;
+ end_shift = dest_v1->end_shift;
+
+ DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
+
+ if (mon_mode == EXTERNAL_MODE) {
+ err = iwx_alloc_fw_monitor(sc, size_power);
+ if (err)
+ return err;
+ }
+
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+
+ for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
+ uint32_t addr, val;
+ uint8_t op;
+
+ addr = le32toh(dest_v1->reg_ops[i].addr);
+ val = le32toh(dest_v1->reg_ops[i].val);
+ op = dest_v1->reg_ops[i].op;
+
+ DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
+ switch (op) {
+ case CSR_ASSIGN:
+ IWX_WRITE(sc, addr, val);
+ break;
+ case CSR_SETBIT:
+ IWX_SETBITS(sc, addr, (1 << val));
+ break;
+ case CSR_CLEARBIT:
+ IWX_CLRBITS(sc, addr, (1 << val));
+ break;
+ case PRPH_ASSIGN:
+ iwx_write_prph(sc, addr, val);
+ break;
+ case PRPH_SETBIT:
+ iwx_set_bits_prph(sc, addr, (1 << val));
+ break;
+ case PRPH_CLEARBIT:
+ iwx_clear_bits_prph(sc, addr, (1 << val));
+ break;
+ case PRPH_BLOCKBIT:
+ if (iwx_read_prph(sc, addr) & (1 << val))
+ goto monitor;
+ break;
+ default:
+ DPRINTF(("%s: FW debug - unknown OP %d\n",
+ DEVNAME(sc), op));
+ break;
+ }
+ }
+
+monitor:
+ if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
+ iwx_write_prph(sc, le32toh(base_reg),
+ sc->fw_mon.paddr >> base_shift);
+ iwx_write_prph(sc, end_reg,
+ (sc->fw_mon.paddr + sc->fw_mon.size - 256)
+ >> end_shift);
+ }
+
+ iwx_nic_unlock(sc);
+ return 0;
+}
+
+int
+iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
+{
+ struct iwx_context_info *ctxt_info;
+ struct iwx_context_info_rbd_cfg *rx_cfg;
+ uint32_t control_flags = 0, rb_size;
+ int err;
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
+ sizeof(*ctxt_info), 0);
+ if (err) {
+ printf("%s: could not allocate context info DMA memory\n",
+ DEVNAME(sc));
+ return err;
+ }
+ ctxt_info = sc->ctxt_info_dma.vaddr;
+
+ ctxt_info->version.version = 0;
+ ctxt_info->version.mac_id =
+ htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
+ /* size is in DWs */
+ ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
+ rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
+ else
+ rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
+
+ KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
+ control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
+ (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
+ IWX_CTXT_INFO_RB_CB_SIZE_POS) |
+ (rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
+ ctxt_info->control.control_flags = htole32(control_flags);
+
+ /* initialize RX default queue */
+ rx_cfg = &ctxt_info->rbd_cfg;
+ rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
+ rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
+ rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
+
+ /* initialize TX command queue */
+ ctxt_info->hcmd_cfg.cmd_queue_addr =
+ htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
+ ctxt_info->hcmd_cfg.cmd_queue_size =
+ IWX_TFD_QUEUE_CB_SIZE(IWX_CMD_QUEUE_SIZE);
+
+ /* allocate ucode sections in dram and set addresses */
+ err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
+ if (err) {
+ iwx_ctxt_info_free(sc);
+ return err;
+ }
+
+ /* Configure debug, if exists */
+ if (sc->sc_fw.dbg_dest_tlv_v1) {
+ err = iwx_apply_debug_destination(sc);
+ if (err)
+ return err;
+ }
+
+ /* kick FW self load */
+ IWX_WRITE_8(sc, IWX_CSR_CTXT_INFO_BA, sc->ctxt_info_dma.paddr);
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+ iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
+ iwx_nic_unlock(sc);
+
+ /* Context info will be released upon alive or failure to get one */
+
+ return 0;
+}
+
+void
+iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ KASSERT(dram->fw_cnt == 0);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ iwx_dma_contig_free(&dram->fw[i]);
+
+ free(dram->fw, M_DEVBUF, dram->fw_cnt * sizeof(dram->fw[0]));
+ dram->fw_cnt = 0;
+ dram->fw = NULL;
+}
+
+void
+iwx_ctxt_info_free(struct iwx_softc *sc)
+{
+ iwx_dma_contig_free(&sc->ctxt_info_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+}
+
+int
+iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
+ uint8_t *data, size_t dlen)
+{
+ struct iwx_fw_sects *fws;
+ struct iwx_fw_onesect *fwone;
+
+ if (type >= IWX_UCODE_TYPE_MAX)
+ return EINVAL;
+ if (dlen < sizeof(uint32_t))
+ return EINVAL;
+
+ fws = &sc->sc_fw.fw_sects[type];
+ DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
+ if (fws->fw_count >= IWX_UCODE_SECT_MAX)
+ return EINVAL;
+
+ fwone = &fws->fw_sect[fws->fw_count];
+
+ /* first 32bit are device load offset */
+ memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
+
+ /* rest is data */
+ fwone->fws_data = data + sizeof(uint32_t);
+ fwone->fws_len = dlen - sizeof(uint32_t);
+
+ fws->fw_count++;
+ fws->fw_totlen += fwone->fws_len;
+
+ return 0;
+}
+
+#define IWX_DEFAULT_SCAN_CHANNELS 40
+/* Newer firmware might support more channels. Raise this value if needed. */
+#define IWX_MAX_SCAN_CHANNELS 52 /* as of 8265-34 firmware image */
+
+struct iwx_tlv_calib_data {
+ uint32_t ucode_type;
+ struct iwx_tlv_calib_ctrl calib;
+} __packed;
+
+int
+iwx_set_default_calib(struct iwx_softc *sc, const void *data)
+{
+ const struct iwx_tlv_calib_data *def_calib = data;
+ uint32_t ucode_type = le32toh(def_calib->ucode_type);
+
+ if (ucode_type >= IWX_UCODE_TYPE_MAX)
+ return EINVAL;
+
+ sc->sc_default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ sc->sc_default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
+ return 0;
+}
+
+void
+iwx_fw_info_free(struct iwx_fw_info *fw)
+{
+ free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
+ fw->fw_rawdata = NULL;
+ fw->fw_rawsize = 0;
+ /* don't touch fw->fw_status */
+ memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
+}
+
+#define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
+
+int
+iwx_read_firmware(struct iwx_softc *sc)
+{
+ struct iwx_fw_info *fw = &sc->sc_fw;
+ struct iwx_tlv_ucode_header *uhdr;
+ struct iwx_ucode_tlv tlv;
+ uint32_t tlv_type;
+ uint8_t *data;
+ int err;
+ size_t len;
+
+ if (fw->fw_status == IWX_FW_STATUS_DONE)
+ return 0;
+
+ while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
+ tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
+ fw->fw_status = IWX_FW_STATUS_INPROGRESS;
+
+ if (fw->fw_rawdata != NULL)
+ iwx_fw_info_free(fw);
+
+ err = loadfirmware(sc->sc_fwname,
+ (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
+ if (err) {
+ printf("%s: could not read firmware %s (error %d)\n",
+ DEVNAME(sc), sc->sc_fwname, err);
+ goto out;
+ }
+
+ sc->sc_capaflags = 0;
+ sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
+ memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
+
+ uhdr = (void *)fw->fw_rawdata;
+ if (*(uint32_t *)fw->fw_rawdata != 0
+ || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
+ printf("%s: invalid firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ err = EINVAL;
+ goto out;
+ }
+
+ snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
+ IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
+ IWX_UCODE_MINOR(le32toh(uhdr->ver)),
+ IWX_UCODE_API(le32toh(uhdr->ver)));
+ data = uhdr->data;
+ len = fw->fw_rawsize - sizeof(*uhdr);
+
+ while (len >= sizeof(tlv)) {
+ size_t tlv_len;
+ void *tlv_data;
+
+ memcpy(&tlv, data, sizeof(tlv));
+ tlv_len = le32toh(tlv.length);
+ tlv_type = le32toh(tlv.type);
+
+ len -= sizeof(tlv);
+ data += sizeof(tlv);
+ tlv_data = data;
+
+ if (len < tlv_len) {
+ printf("%s: firmware too short: %zu bytes\n",
+ DEVNAME(sc), len);
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ switch (tlv_type) {
+ case IWX_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len < sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_max_probe_len
+ = le32toh(*(uint32_t *)tlv_data);
+ if (sc->sc_capa_max_probe_len >
+ IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+ case IWX_UCODE_TLV_PAN:
+ if (tlv_len) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
+ break;
+ case IWX_UCODE_TLV_FLAGS:
+ if (tlv_len < sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ /*
+ * Apparently there can be many flags, but Linux driver
+ * parses only the first one, and so do we.
+ *
+ * XXX: why does this override IWX_UCODE_TLV_PAN?
+ * Intentional or a bug? Observations from
+ * current firmware file:
+ * 1) TLV_PAN is parsed first
+ * 2) TLV_FLAGS contains TLV_FLAGS_PAN
+ * ==> this resets TLV_PAN to itself... hnnnk
+ */
+ sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
+ break;
+ case IWX_UCODE_TLV_CSCHEME:
+ err = iwx_store_cscheme(sc, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_NUM_OF_CPU: {
+ uint32_t num_cpu;
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ num_cpu = le32toh(*(uint32_t *)tlv_data);
+ if (num_cpu < 1 || num_cpu > 2) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+ }
+ case IWX_UCODE_TLV_SEC_RT:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_SEC_INIT:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_SEC_WOWLAN:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_DEF_CALIB:
+ if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ err = iwx_set_default_calib(sc, tlv_data);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_PHY_SKU:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
+ break;
+
+ case IWX_UCODE_TLV_API_CHANGES_SET: {
+ struct iwx_ucode_api *api;
+ int idx, i;
+ if (tlv_len != sizeof(*api)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ api = (struct iwx_ucode_api *)tlv_data;
+ idx = le32toh(api->api_index);
+ if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(api->api_flags) & (1 << i)) == 0)
+ continue;
+ setbit(sc->sc_ucode_api, i + (32 * idx));
+ }
+ break;
+ }
+
+ case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
+ struct iwx_ucode_capa *capa;
+ int idx, i;
+ if (tlv_len != sizeof(*capa)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ capa = (struct iwx_ucode_capa *)tlv_data;
+ idx = le32toh(capa->api_index);
+ if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(capa->api_capa) & (1 << i)) == 0)
+ continue;
+ setbit(sc->sc_enabled_capa, i + (32 * idx));
+ }
+ break;
+ }
+
+ case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
+ case IWX_UCODE_TLV_FW_GSCAN_CAPA:
+ /* ignore, not used by current driver */
+ break;
+
+ case IWX_UCODE_TLV_SEC_RT_USNIFFER:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
+ tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+
+ case IWX_UCODE_TLV_PAGING:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+
+ case IWX_UCODE_TLV_N_SCAN_CHANNELS:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_n_scan_channels =
+ le32toh(*(uint32_t *)tlv_data);
+ if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
+ err = ERANGE;
+ goto parse_out;
+ }
+ break;
+
+ case IWX_UCODE_TLV_FW_VERSION:
+ if (tlv_len != sizeof(uint32_t) * 3) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
+ "%u.%u.%u",
+ le32toh(((uint32_t *)tlv_data)[0]),
+ le32toh(((uint32_t *)tlv_data)[1]),
+ le32toh(((uint32_t *)tlv_data)[2]));
+ break;
+
+ case IWX_UCODE_TLV_FW_DBG_DEST: {
+ struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+
+ fw->dbg_dest_ver = (uint8_t *)tlv_data;
+ if (*fw->dbg_dest_ver != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ if (fw->dbg_dest_tlv_init)
+ break;
+ fw->dbg_dest_tlv_init = true;
+
+ dest_v1 = (void *)tlv_data;
+ fw->dbg_dest_tlv_v1 = dest_v1;
+ fw->n_dest_reg = tlv_len -
+ offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
+ fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
+ DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
+ break;
+ }
+
+ case IWX_UCODE_TLV_FW_DBG_CONF: {
+ struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+
+ if (!fw->dbg_dest_tlv_init ||
+ conf->id >= nitems(fw->dbg_conf_tlv) ||
+ fw->dbg_conf_tlv[conf->id] != NULL)
+ break;
+
+ DPRINTF(("Found debug configuration: %d\n", conf->id));
+ fw->dbg_conf_tlv[conf->id] = conf;
+ fw->dbg_conf_tlv_len[conf->id] = tlv_len;
+ break;
+ }
+
+ case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
+ struct iwx_umac_debug_addrs *dbg_ptrs =
+ (void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
+ break;
+ sc->sc_uc.uc_umac_error_event_table =
+ le32toh(dbg_ptrs->error_info_addr) &
+ ~IWX_FW_ADDR_CACHE_CONTROL;
+ sc->sc_uc.error_event_table_tlv_status |=
+ IWX_ERROR_EVENT_TABLE_UMAC;
+ break;
+ }
+
+ case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
+ struct iwx_lmac_debug_addrs *dbg_ptrs =
+ (void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
+ break;
+ sc->sc_uc.uc_lmac_error_event_table[0] =
+ le32toh(dbg_ptrs->error_event_table_ptr) &
+ ~IWX_FW_ADDR_CACHE_CONTROL;
+ sc->sc_uc.error_event_table_tlv_status |=
+ IWX_ERROR_EVENT_TABLE_LMAC1;
+ break;
+ }
+
+ case IWX_UCODE_TLV_FW_MEM_SEG:
+ break;
+
+ case IWX_UCODE_TLV_CMD_VERSIONS:
+ if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
+ tlv_len /= sizeof(struct iwx_fw_cmd_version);
+ tlv_len *= sizeof(struct iwx_fw_cmd_version);
+ }
+ if (sc->n_cmd_versions != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (tlv_len > sizeof(sc->cmd_versions)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
+ sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
+ break;
+
+ case IWX_UCODE_TLV_FW_RECOVERY_INFO:
+ break;
+
+ /* undocumented TLVs found in ax200-cc-a0-46 image */
+ case 58:
+ case 0x1000003:
+ case 0x1000004:
+ break;
+
+ default:
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ len -= roundup(tlv_len, 4);
+ data += roundup(tlv_len, 4);
+ }
+
+ KASSERT(err == 0);
+
+ parse_out:
+ if (err) {
+ printf("%s: firmware parse error %d, "
+ "section type %d\n", DEVNAME(sc), err, tlv_type);
+ }
+
+ out:
+ if (err) {
+ fw->fw_status = IWX_FW_STATUS_NONE;
+ if (fw->fw_rawdata != NULL)
+ iwx_fw_info_free(fw);
+ } else
+ fw->fw_status = IWX_FW_STATUS_DONE;
+ wakeup(&sc->sc_fw);
+
+ return err;
+}
+
+uint32_t
+iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
+{
+ iwx_nic_assert_locked(sc);
+ IWX_WRITE(sc,
+ IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
+ IWX_BARRIER_READ_WRITE(sc);
+ return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
+}
+
+void
+iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ iwx_nic_assert_locked(sc);
+ IWX_WRITE(sc,
+ IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
+ IWX_BARRIER_WRITE(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
+}
+
+void
+iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
+{
+ iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
+ iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
+}
+
+int
+iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
+{
+ int offs, err = 0;
+ uint32_t *vals = buf;
+
+ if (iwx_nic_lock(sc)) {
+ IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
+ iwx_nic_unlock(sc);
+ } else {
+ err = EBUSY;
+ }
+ return err;
+}
+
+int
+iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
+{
+ int offs;
+ const uint32_t *vals = buf;
+
+ if (iwx_nic_lock(sc)) {
+ IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
+ /* WADDR auto-increments */
+ for (offs = 0; offs < dwords; offs++) {
+ uint32_t val = vals ? vals[offs] : 0;
+ IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
+ }
+ iwx_nic_unlock(sc);
+ } else {
+ return EBUSY;
+ }
+ return 0;
+}
+
+int
+iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ return iwx_write_mem(sc, addr, &val, 1);
+}
+
+int
+iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
+ int timo)
+{
+ for (;;) {
+ if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
+ return 1;
+ }
+ if (timo < 10) {
+ return 0;
+ }
+ timo -= 10;
+ DELAY(10);
+ }
+}
+
+int
+iwx_nic_lock(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks > 0) {
+ iwx_nic_assert_locked(sc);
+ sc->sc_nic_locks++;
+ return 1; /* already locked */
+ }
+
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ DELAY(2);
+
+ if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
+ | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
+ sc->sc_nic_locks++;
+ return 1;
+ }
+
+ printf("%s: acquiring device failed\n", DEVNAME(sc));
+ return 0;
+}
+
+void
+iwx_nic_assert_locked(struct iwx_softc *sc)
+{
+ uint32_t reg = IWX_READ(sc, IWX_CSR_GP_CNTRL);
+ if ((reg & IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
+ panic("%s: mac clock not ready", DEVNAME(sc));
+ if (reg & IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
+ panic("%s: mac gone to sleep", DEVNAME(sc));
+ if (sc->sc_nic_locks <= 0)
+ panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
+}
+
+void
+iwx_nic_unlock(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks > 0) {
+ if (--sc->sc_nic_locks == 0)
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ } else
+ printf("%s: NIC already unlocked\n", DEVNAME(sc));
+}
+
+void
+iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
+ uint32_t mask)
+{
+ uint32_t val;
+
+ /* XXX: no error path? */
+ if (iwx_nic_lock(sc)) {
+ val = iwx_read_prph(sc, reg) & mask;
+ val |= bits;
+ iwx_write_prph(sc, reg, val);
+ iwx_nic_unlock(sc);
+ }
+}
+
+void
+iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
+{
+ iwx_set_bits_mask_prph(sc, reg, bits, ~0);
+}
+
+void
+iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
+{
+ iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
+}
+
+int
+iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
+ bus_size_t size, bus_size_t alignment)
+{
+ int nsegs, err;
+ caddr_t va;
+
+ dma->tag = tag;
+ dma->size = size;
+
+ err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
+ &dma->map);
+ if (err)
+ goto fail;
+
+ err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
+ BUS_DMA_NOWAIT);
+ if (err)
+ goto fail;
+
+ err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
+ BUS_DMA_NOWAIT);
+ if (err)
+ goto fail;
+ dma->vaddr = va;
+
+ err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
+ BUS_DMA_NOWAIT);
+ if (err)
+ goto fail;
+
+ memset(dma->vaddr, 0, size);
+ bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
+ dma->paddr = dma->map->dm_segs[0].ds_addr;
+
+ return 0;
+
+fail: iwx_dma_contig_free(dma);
+ return err;
+}
+
+void
+iwx_dma_contig_free(struct iwx_dma_info *dma)
+{
+ if (dma->map != NULL) {
+ if (dma->vaddr != NULL) {
+ bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
+ bus_dmamem_free(dma->tag, &dma->seg, 1);
+ dma->vaddr = NULL;
+ }
+ bus_dmamap_destroy(dma->tag, dma->map);
+ dma->map = NULL;
+ }
+}
+
+int
+iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ bus_size_t size;
+ int i, err;
+
+ ring->cur = 0;
+
+ /* Allocate RX descriptors (256-byte aligned). */
+ size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
+ if (err) {
+ printf("%s: could not allocate RX ring DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ ring->desc = ring->free_desc_dma.vaddr;
+
+ /* Allocate RX status area (16-byte aligned). */
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
+ sizeof(*ring->stat), 16);
+ if (err) {
+ printf("%s: could not allocate RX status DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ ring->stat = ring->stat_dma.vaddr;
+
+ size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
+ size, 256);
+ if (err) {
+ printf("%s: could not allocate RX ring DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
+ struct iwx_rx_data *data = &ring->data[i];
+
+ memset(data, 0, sizeof(*data));
+ err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
+ IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
+ &data->map);
+ if (err) {
+ printf("%s: could not create RX buf DMA map\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+
+ err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+fail: iwx_free_rx_ring(sc, ring);
+ return err;
+}
+
+void
+iwx_disable_rx_dma(struct iwx_softc *sc)
+{
+ int ntries;
+
+ if (iwx_nic_lock(sc)) {
+ iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
+ for (ntries = 0; ntries < 1000; ntries++) {
+ if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
+ IWX_RXF_DMA_IDLE)
+ break;
+ DELAY(10);
+ }
+ iwx_nic_unlock(sc);
+ }
+}
+
+void
+iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ ring->cur = 0;
+ bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
+ ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
+ memset(ring->stat, 0, sizeof(*ring->stat));
+ bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
+ ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
+
+}
+
+void
+iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ int i;
+
+ iwx_dma_contig_free(&ring->free_desc_dma);
+ iwx_dma_contig_free(&ring->stat_dma);
+ iwx_dma_contig_free(&ring->used_desc_dma);
+
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
+ struct iwx_rx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->map != NULL)
+ bus_dmamap_destroy(sc->sc_dmat, data->map);
+ }
+}
+
+int
+iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
+{
+ bus_addr_t paddr;
+ bus_size_t size;
+ int i, err, qlen;
+
+ ring->qid = qid;
+ ring->queued = 0;
+ ring->cur = 0;
+ ring->tail = 0;
+
+ if (qid == IWX_DQA_CMD_QUEUE)
+ qlen = IWX_CMD_QUEUE_SIZE;
+ else
+ qlen = IWX_TX_RING_COUNT;
+
+ /* Allocate TX descriptors (256-byte aligned). */
+ size = qlen * sizeof (struct iwx_tfh_tfd);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
+ if (err) {
+ printf("%s: could not allocate TX ring DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ ring->desc = ring->desc_dma.vaddr;
+
+ /*
+ * There is no need to allocate DMA buffers for unused rings.
+ * The hardware supports up to 31 Tx rings which is more
+ * than we currently need.
+ *
+ * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
+ * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
+ * are sc->tqx[IWX_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
+ * in order to provide one queue per EDCA category.
+ *
+ * Tx aggregation will require additional queues (one queue per TID
+ * for which aggregation is enabled) but we do not implement this yet.
+ */
+ if (qid > IWX_DQA_MAX_MGMT_QUEUE)
+ return 0;
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
+ sizeof(struct iwx_agn_scd_bc_tbl), 0);
+ if (err) {
+ printf("%s: could not allocate byte count table DMA memory\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+
+ size = qlen * sizeof(struct iwx_device_cmd);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
+ IWX_FIRST_TB_SIZE_ALIGN);
+ if (err) {
+ printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
+ goto fail;
+ }
+ ring->cmd = ring->cmd_dma.vaddr;
+
+ paddr = ring->cmd_dma.paddr;
+ for (i = 0; i < qlen; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+ size_t mapsize;
+
+ data->cmd_paddr = paddr;
+ paddr += sizeof(struct iwx_device_cmd);
+
+ /* FW commands may require more mapped space than packets. */
+ if (qid == IWX_DQA_CMD_QUEUE)
+ mapsize = (sizeof(struct iwx_cmd_header) +
+ IWX_MAX_CMD_PAYLOAD_SIZE);
+ else
+ mapsize = MCLBYTES;
+ err = bus_dmamap_create(sc->sc_dmat, mapsize,
+ IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
+ &data->map);
+ if (err) {
+ printf("%s: could not create TX buf DMA map\n",
+ DEVNAME(sc));
+ goto fail;
+ }
+ }
+ KASSERT(paddr == ring->cmd_dma.paddr + size);
+ return 0;
+
+fail: iwx_free_tx_ring(sc, ring);
+ return err;
+}
+
+void
+iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ int i, qlen;
+
+ if (ring->qid == IWX_DQA_CMD_QUEUE)
+ qlen = IWX_CMD_QUEUE_SIZE;
+ else
+ qlen = IWX_TX_RING_COUNT;
+
+ for (i = 0; i < qlen; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ }
+ /* Clear TX descriptors. */
+ memset(ring->desc, 0, ring->desc_dma.size);
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
+ ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
+ sc->qfullmsk &= ~(1 << ring->qid);
+ ring->queued = 0;
+ ring->cur = 0;
+ ring->tail = 0;
+}
+
+void
+iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ int i, qlen;
+
+ iwx_dma_contig_free(&ring->desc_dma);
+ iwx_dma_contig_free(&ring->cmd_dma);
+ iwx_dma_contig_free(&ring->bc_tbl);
+
+ if (ring->qid == IWX_DQA_CMD_QUEUE)
+ qlen = IWX_CMD_QUEUE_SIZE;
+ else
+ qlen = IWX_TX_RING_COUNT;
+
+ for (i = 0; i < qlen; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->map != NULL)
+ bus_dmamap_destroy(sc->sc_dmat, data->map);
+ }
+}
+
+void
+iwx_enable_rfkill_int(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ sc->sc_fh_init_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
+ sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
+ }
+
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
+}
+
+int
+iwx_check_rfkill(struct iwx_softc *sc)
+{
+ uint32_t v;
+ int s;
+ int rv;
+
+ s = splnet();
+
+ /*
+ * "documentation" is not really helpful here:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ *
+ * But apparently when it's off, it's on ...
+ */
+ v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
+ rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
+ if (rv) {
+ sc->sc_flags |= IWX_FLAG_RFKILL;
+ } else {
+ sc->sc_flags &= ~IWX_FLAG_RFKILL;
+ }
+
+ splx(s);
+ return rv;
+}
+
+void
+iwx_enable_interrupts(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INI_SET_MASK;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ /*
+ * fh/hw_mask keeps all the unmasked causes.
+ * Unlike msi, in msix cause is enabled when it is unset.
+ */
+ sc->sc_hw_mask = sc->sc_hw_init_mask;
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ ~sc->sc_fh_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~sc->sc_hw_mask);
+ }
+}
+
+void
+iwx_enable_fwload_interrupt(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
+ sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
+ /*
+ * Leave all the FH causes enabled to get the ALIVE
+ * notification.
+ */
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ ~sc->sc_fh_init_mask);
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ }
+}
+
+void
+iwx_restore_interrupts(struct iwx_softc *sc)
+{
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+}
+
+void
+iwx_disable_interrupts(struct iwx_softc *sc)
+{
+ int s = splnet();
+
+ if (!sc->sc_msix) {
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+
+ /* acknowledge all interrupts */
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ sc->sc_fh_init_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ sc->sc_hw_init_mask);
+ }
+
+ splx(s);
+}
+
+void
+iwx_ict_reset(struct iwx_softc *sc)
+{
+ iwx_disable_interrupts(sc);
+
+ memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
+ sc->ict_cur = 0;
+
+ /* Set physical address of ICT (4KB aligned). */
+ IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
+ IWX_CSR_DRAM_INT_TBL_ENABLE
+ | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
+ | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
+ | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
+
+ /* Switch to ICT interrupt mode in driver. */
+ sc->sc_flags |= IWX_FLAG_USE_ICT;
+
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ iwx_enable_interrupts(sc);
+}
+
+#define IWX_HW_READY_TIMEOUT 50
+int
+iwx_set_hw_ready(struct iwx_softc *sc)
+{
+ int ready;
+
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWX_HW_READY_TIMEOUT);
+ if (ready)
+ IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
+ IWX_CSR_MBOX_SET_REG_OS_ALIVE);
+
+ return ready;
+}
+#undef IWX_HW_READY_TIMEOUT
+
+int
+iwx_prepare_card_hw(struct iwx_softc *sc)
+{
+ int t = 0;
+
+ if (iwx_set_hw_ready(sc))
+ return 0;
+
+ IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ DELAY(1000);
+
+
+ /* If HW is not ready, prepare the conditions to check again */
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ if (iwx_set_hw_ready(sc))
+ return 0;
+ DELAY(200);
+ t += 200;
+ } while (t < 150000);
+
+ return ETIMEDOUT;
+}
+
+void
+iwx_apm_config(struct iwx_softc *sc)
+{
+ pcireg_t lctl, cap;
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
+ sc->sc_cap_off + PCI_PCIE_LCSR);
+ if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
+ IWX_SETBITS(sc, IWX_CSR_GIO_REG,
+ IWX_CSR_GIO_REG_VAL_L0S_ENABLED);
+ } else {
+ IWX_CLRBITS(sc, IWX_CSR_GIO_REG,
+ IWX_CSR_GIO_REG_VAL_L0S_ENABLED);
+ }
+
+ cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
+ sc->sc_cap_off + PCI_PCIE_DCSR2);
+ sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
+ DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
+ DEVNAME(sc),
+ (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
+ sc->sc_ltr_enabled ? "En" : "Dis"));
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * e.g. after platform boot or shutdown.
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int
+iwx_apm_init(struct iwx_softc *sc)
+{
+ int err = 0;
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
+ IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwx_apm_config(sc);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwx_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
+ printf("%s: timeout waiting for clock stabilization\n",
+ DEVNAME(sc));
+ err = ETIMEDOUT;
+ goto out;
+ }
+ out:
+ if (err)
+ printf("%s: apm init error %d\n", DEVNAME(sc), err);
+ return err;
+}
+
+void
+iwx_apm_stop(struct iwx_softc *sc)
+{
+ IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
+ IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ DELAY(1000);
+ IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ DELAY(5000);
+
+ /* stop device's busmaster DMA activity */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ if (!iwx_poll_bit(sc, IWX_CSR_RESET,
+ IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
+ printf("%s: timeout waiting for master\n", DEVNAME(sc));
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+void
+iwx_init_msix_hw(struct iwx_softc *sc)
+{
+ iwx_conf_msix_hw(sc, 0);
+
+ if (!sc->sc_msix)
+ return;
+
+ sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
+ sc->sc_hw_mask = sc->sc_hw_init_mask;
+}
+
+void
+iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
+{
+ int vector = 0;
+
+ if (!sc->sc_msix) {
+ /* Newer chips default to MSIX. */
+ if (!stopped && iwx_nic_lock(sc)) {
+ iwx_write_prph(sc, IWX_UREG_CHICK,
+ IWX_UREG_CHICK_MSI_ENABLE);
+ iwx_nic_unlock(sc);
+ }
+ return;
+ }
+
+ if (!stopped && iwx_nic_lock(sc)) {
+ iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
+ iwx_nic_unlock(sc);
+ }
+
+ /* Disable all interrupts */
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
+
+ /* Map fallback-queue (command/mgmt) to a single vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ /* Map RSS queue (data) to the same vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+
+ /* Enable the RX queues cause interrupts */
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
+
+ /* Map non-RX causes to the same vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+
+ /* Enable non-RX causes interrupts */
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
+ IWX_MSIX_FH_INT_CAUSES_S2D |
+ IWX_MSIX_FH_INT_CAUSES_FH_ERR);
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
+ IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
+ IWX_MSIX_HW_INT_CAUSES_REG_IML |
+ IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
+ IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
+ IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
+ IWX_MSIX_HW_INT_CAUSES_REG_SCD |
+ IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
+ IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
+ IWX_MSIX_HW_INT_CAUSES_REG_HAP);
+}
+
+int
+iwx_start_hw(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_prepare_card_hw(sc);
+ if (err)
+ return err;
+
+ /* Reset the entire device */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+
+ err = iwx_apm_init(sc);
+ if (err)
+ return err;
+
+ iwx_init_msix_hw(sc);
+
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ return 0;
+}
+
+
+void
+iwx_stop_device(struct iwx_softc *sc)
+{
+ int qid;
+
+ iwx_disable_interrupts(sc);
+ sc->sc_flags &= ~IWX_FLAG_USE_ICT;
+
+ iwx_disable_rx_dma(sc);
+ iwx_reset_rx_ring(sc, &sc->rxq);
+ for (qid = 0; qid < nitems(sc->txq); qid++)
+ iwx_reset_tx_ring(sc, &sc->txq[qid]);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (sc->sc_nic_locks > 0)
+ printf("%s: %d active NIC locks forcefully cleared\n",
+ DEVNAME(sc), sc->sc_nic_locks);
+ sc->sc_nic_locks = 0;
+
+ /* Stop the device, and put it in low power state */
+ iwx_apm_stop(sc);
+
+ /* Reset the on-board processor. */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwx_conf_msix_hw(sc, 1);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * Clear the interrupt again.
+ */
+ iwx_disable_interrupts(sc);
+
+ /* Even though we stop the HW we still want the RF kill interrupt. */
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ iwx_prepare_card_hw(sc);
+
+ iwx_ctxt_info_free_paging(sc);
+}
+
+void
+iwx_nic_config(struct iwx_softc *sc)
+{
+ uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ uint32_t mask, val, reg_val = 0;
+
+ radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
+ IWX_FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
+ IWX_FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
+ IWX_FW_PHY_CFG_RADIO_DASH_POS;
+
+ reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
+ IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
+ IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
+
+ val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
+ val &= ~mask;
+ val |= reg_val;
+ IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
+}
+
+int
+iwx_nic_rx_init(struct iwx_softc *sc)
+{
+ IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
+
+ /*
+ * We don't configure the RFH; the firmware will do that.
+ * Rx descriptors are set when firmware sends an ALIVE interrupt.
+ */
+ return 0;
+}
+
+int
+iwx_nic_init(struct iwx_softc *sc)
+{
+ int err;
+
+ iwx_apm_init(sc);
+ iwx_nic_config(sc);
+
+ err = iwx_nic_rx_init(sc);
+ if (err)
+ return err;
+
+ IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
+
+ return 0;
+}
+
+/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
+const uint8_t iwx_ac_to_tx_fifo[] = {
+ IWX_TX_FIFO_BE,
+ IWX_TX_FIFO_BK,
+ IWX_TX_FIFO_VI,
+ IWX_TX_FIFO_VO,
+};
+
+int
+iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
+ int num_slots)
+{
+ struct iwx_tx_queue_cfg_cmd cmd;
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_queue_cfg_rsp *resp;
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_SCD_QUEUE_CFG,
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ int err, fwqid;
+ uint32_t wr_idx;
+ size_t resp_len;
+
+ iwx_reset_tx_ring(sc, ring);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = sta_id;
+ cmd.tid = tid;
+ cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
+ cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
+ cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
+
+ hcmd.data[0] = &cmd;
+ hcmd.len[0] = sizeof(cmd);
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ DPRINTF(("SCD_QUEUE_CFG command failed\n"));
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ fwqid = le16toh(resp->queue_number);
+ wr_idx = le16toh(resp->write_pointer);
+
+ /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
+ if (fwqid != qid) {
+ DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
+ err = EIO;
+ goto out;
+ }
+
+ if (wr_idx != ring->cur) {
+ DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
+ err = EIO;
+ goto out;
+ }
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+void
+iwx_post_alive(struct iwx_softc *sc)
+{
+ iwx_ict_reset(sc);
+ iwx_ctxt_info_free(sc);
+}
+
+/*
+ * For the high priority TE use a time event type that has similar priority to
+ * the FW's action scan priority.
+ */
+#define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
+#define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
+
+int
+iwx_send_time_event_cmd(struct iwx_softc *sc,
+ const struct iwx_time_event_cmd *cmd)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_time_event_resp *resp;
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_TIME_EVENT_CMD,
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ uint32_t resp_len;
+ int err;
+
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = sizeof(*cmd);
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ if (le32toh(resp->status) == 0)
+ sc->sc_time_event_uid = le32toh(resp->unique_id);
+ else
+ err = EIO;
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+void
+iwx_protect_session(struct iwx_softc *sc, struct iwx_node *in,
+ uint32_t duration, uint32_t max_delay)
+{
+ struct iwx_time_event_cmd time_cmd;
+
+ /* Do nothing if a time event is already scheduled. */
+ if (sc->sc_flags & IWX_FLAG_TE_ACTIVE)
+ return;
+
+ memset(&time_cmd, 0, sizeof(time_cmd));
+
+ time_cmd.action = htole32(IWX_FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ time_cmd.id = htole32(IWX_TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+ time_cmd.apply_time = htole32(0);
+
+ time_cmd.max_frags = IWX_TE_V2_FRAG_NONE;
+ time_cmd.max_delay = htole32(max_delay);
+ /* TODO: why do we need to interval = bi if it is not periodic? */
+ time_cmd.interval = htole32(1);
+ time_cmd.duration = htole32(duration);
+ time_cmd.repeat = 1;
+ time_cmd.policy
+ = htole16(IWX_TE_V2_NOTIF_HOST_EVENT_START |
+ IWX_TE_V2_NOTIF_HOST_EVENT_END |
+ IWX_T2_V2_START_IMMEDIATELY);
+
+ if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
+ sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
+
+ DELAY(100);
+}
+
+void
+iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_time_event_cmd time_cmd;
+
+ /* Do nothing if the time event has already ended. */
+ if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
+ return;
+
+ memset(&time_cmd, 0, sizeof(time_cmd));
+
+ time_cmd.action = htole32(IWX_FW_CTXT_ACTION_REMOVE);
+ time_cmd.id_and_color =
+ htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ time_cmd.id = htole32(sc->sc_time_event_uid);
+
+ if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+
+ DELAY(100);
+}
+
+/*
+ * NVM read access and content parsing. We do not support
+ * external NVM or writing NVM.
+ */
+
+/* list of NVM sections we are allowed/need to read */
+const int iwx_nvm_to_read[] = {
+ IWX_NVM_SECTION_TYPE_SW,
+ IWX_NVM_SECTION_TYPE_REGULATORY,
+ IWX_NVM_SECTION_TYPE_CALIBRATION,
+ IWX_NVM_SECTION_TYPE_PRODUCTION,
+ IWX_NVM_SECTION_TYPE_REGULATORY_SDP,
+ IWX_NVM_SECTION_TYPE_HW_8000,
+ IWX_NVM_SECTION_TYPE_MAC_OVERRIDE,
+ IWX_NVM_SECTION_TYPE_PHY_SKU,
+};
+
+#define IWX_NVM_DEFAULT_CHUNK_SIZE (2*1024)
+
+#define IWX_NVM_WRITE_OPCODE 1
+#define IWX_NVM_READ_OPCODE 0
+
+int
+iwx_nvm_read_chunk(struct iwx_softc *sc, uint16_t section, uint16_t offset,
+ uint16_t length, uint8_t *data, uint16_t *len)
+{
+ offset = 0;
+ struct iwx_nvm_access_cmd nvm_access_cmd = {
+ .offset = htole16(offset),
+ .length = htole16(length),
+ .type = htole16(section),
+ .op_code = IWX_NVM_READ_OPCODE,
+ };
+ struct iwx_nvm_access_resp *nvm_resp;
+ struct iwx_rx_packet *pkt;
+ struct iwx_host_cmd cmd = {
+ .id = IWX_NVM_ACCESS_CMD,
+ .flags = (IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL),
+ .resp_pkt_len = IWX_CMD_RESP_MAX,
+ .data = { &nvm_access_cmd, },
+ };
+ int err, offset_read;
+ size_t bytes_read;
+ uint8_t *resp_data;
+
+ cmd.len[0] = sizeof(struct iwx_nvm_access_cmd);
+
+ err = iwx_send_cmd(sc, &cmd);
+ if (err)
+ return err;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWX_CMD_FAILED_MSK) {
+ err = EIO;
+ goto exit;
+ }
+
+ /* Extract NVM response */
+ nvm_resp = (void *)pkt->data;
+ if (nvm_resp == NULL)
+ return EIO;
+
+ err = le16toh(nvm_resp->status);
+ bytes_read = le16toh(nvm_resp->length);
+ offset_read = le16toh(nvm_resp->offset);
+ resp_data = nvm_resp->data;
+ if (err) {
+ err = EINVAL;
+ goto exit;
+ }
+
+ if (offset_read != offset) {
+ err = EINVAL;
+ goto exit;
+ }
+
+ if (bytes_read > length) {
+ err = EINVAL;
+ goto exit;
+ }
+
+ memcpy(data + offset, resp_data, bytes_read);
+ *len = bytes_read;
+
+ exit:
+ iwx_free_resp(sc, &cmd);
+ return err;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ */
+int
+iwx_nvm_read_section(struct iwx_softc *sc, uint16_t section, uint8_t *data,
+ uint16_t *len, size_t max_len)
+{
+ uint16_t chunklen, seglen;
+ int err = 0;
+
+ chunklen = seglen = IWX_NVM_DEFAULT_CHUNK_SIZE;
+ *len = 0;
+
+ /* Read NVM chunks until exhausted (reading less than requested) */
+ while (seglen == chunklen && *len < max_len) {
+ err = iwx_nvm_read_chunk(sc,
+ section, *len, chunklen, data, &seglen);
+ if (err)
+ return err;
+
+ *len += seglen;
+ }
+
+ return err;
+}
+
+uint8_t
+iwx_fw_valid_tx_ant(struct iwx_softc *sc)
+{
+ uint8_t tx_ant;
+
+ tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
+ >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_tx_ant)
+ tx_ant &= sc->sc_nvm.valid_tx_ant;
+
+ return tx_ant;
+}
+
+uint8_t
+iwx_fw_valid_rx_ant(struct iwx_softc *sc)
+{
+ uint8_t rx_ant;
+
+ rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
+ >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_rx_ant)
+ rx_ant &= sc->sc_nvm.valid_rx_ant;
+
+ return rx_ant;
+}
+
+void
+iwx_init_channel_map(struct iwx_softc *sc, const uint16_t * const nvm_ch_flags,
+ const uint8_t *nvm_channels, int nchan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_nvm_data *data = &sc->sc_nvm;
+ int ch_idx;
+ struct ieee80211_channel *channel;
+ uint16_t ch_flags;
+ int is_5ghz;
+ int flags, hw_value;
+
+ for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
+ ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= IWX_NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~IWX_NVM_CHANNEL_VALID;
+
+ if (!(ch_flags & IWX_NVM_CHANNEL_VALID))
+ continue;
+
+ hw_value = nvm_channels[ch_idx];
+ channel = &ic->ic_channels[hw_value];
+
+ is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
+ if (!is_5ghz) {
+ flags = IEEE80211_CHAN_2GHZ;
+ channel->ic_flags
+ = IEEE80211_CHAN_CCK
+ | IEEE80211_CHAN_OFDM
+ | IEEE80211_CHAN_DYN
+ | IEEE80211_CHAN_2GHZ;
+ } else {
+ flags = IEEE80211_CHAN_5GHZ;
+ channel->ic_flags =
+ IEEE80211_CHAN_A;
+ }
+ channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
+
+ if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
+ channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
+
+ if (data->sku_cap_11n_enable)
+ channel->ic_flags |= IEEE80211_CHAN_HT;
+ }
+}
+
+void
+iwx_setup_ht_rates(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint8_t rx_ant;
+
+ /* TX is supported with the same MCS as RX. */
+ ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
+
+ ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
+
+ if (sc->sc_nvm.sku_cap_mimo_disable)
+ return;
+
+ rx_ant = iwx_fw_valid_rx_ant(sc);
+ if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
+ (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
+ ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
+}
+
+#define IWX_MAX_RX_BA_SESSIONS 16
+
+void
+iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
+ uint16_t ssn, uint16_t winsize, int start)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_add_sta_cmd cmd;
+ struct iwx_node *in = (void *)ni;
+ int err, s;
+ uint32_t status;
+
+ if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
+ ieee80211_addba_req_refuse(ic, ni, tid);
+ return;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.sta_id = IWX_STATION_ID;
+ cmd.mac_id_n_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ cmd.add_modify = IWX_STA_MODE_MODIFY;
+
+ if (start) {
+ cmd.add_immediate_ba_tid = (uint8_t)tid;
+ cmd.add_immediate_ba_ssn = htole16(ssn);
+ cmd.rx_ba_window = htole16(winsize);
+ } else {
+ cmd.remove_immediate_ba_tid = (uint8_t)tid;
+ }
+ cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
+ IWX_STA_MODIFY_REMOVE_BA_TID;
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
+ &status);
+
+ s = splnet();
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) == IWX_ADD_STA_SUCCESS) {
+ if (start) {
+ sc->sc_rx_ba_sessions++;
+ ieee80211_addba_req_accept(ic, ni, tid);
+ } else if (sc->sc_rx_ba_sessions > 0)
+ sc->sc_rx_ba_sessions--;
+ } else if (start)
+ ieee80211_addba_req_refuse(ic, ni, tid);
+
+ splx(s);
+}
+
+void
+iwx_htprot_task(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int err, s = splnet();
+
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+ return;
+ }
+
+ /* This call updates HT protection based on in->in_ni.ni_htop1. */
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
+ if (err)
+ printf("%s: could not change HT protection: error %d\n",
+ DEVNAME(sc), err);
+
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
+/*
+ * This function is called by upper layer when HT protection settings in
+ * beacons have changed.
+ */
+void
+iwx_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+
+ /* assumes that ni == ic->ic_bss */
+ iwx_add_task(sc, systq, &sc->htprot_task);
+}
+
+void
+iwx_ba_task(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ int s = splnet();
+
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+ return;
+ }
+
+ if (sc->ba_start)
+ iwx_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
+ sc->ba_winsize, 1);
+ else
+ iwx_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
+
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
+/*
+ * This function is called by upper layer when an ADDBA request is received
+ * from another STA and before the ADDBA response is sent.
+ */
+int
+iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
+ uint8_t tid)
+{
+ struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
+ struct iwx_softc *sc = IC2IFP(ic)->if_softc;
+
+ if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS)
+ return ENOSPC;
+
+ sc->ba_start = 1;
+ sc->ba_tid = tid;
+ sc->ba_ssn = htole16(ba->ba_winstart);
+ sc->ba_winsize = htole16(ba->ba_winsize);
+ iwx_add_task(sc, systq, &sc->ba_task);
+
+ return EBUSY;
+}
+
+/*
+ * This function is called by upper layer on teardown of an HT-immediate
+ * Block Ack agreement (eg. upon receipt of a DELBA frame).
+ */
+void
+iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
+ uint8_t tid)
+{
+ struct iwx_softc *sc = IC2IFP(ic)->if_softc;
+
+ sc->ba_start = 0;
+ sc->ba_tid = tid;
+ iwx_add_task(sc, systq, &sc->ba_task);
+}
+
+void
+iwx_set_hw_address_8000(struct iwx_softc *sc, struct iwx_nvm_data *data,
+ const uint16_t *mac_override, const uint16_t *nvm_hw)
+{
+ const uint8_t *hw_addr;
+
+ if (mac_override) {
+ static const uint8_t reserved_mac[] = {
+ 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+ };
+
+ hw_addr = (const uint8_t *)(mac_override +
+ IWX_MAC_ADDRESS_OVERRIDE_8000);
+
+ /*
+ * Store the MAC address from MAO section.
+ * No byte swapping is required in MAO section
+ */
+ memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
+
+ /*
+ * Force the use of the OTP MAC address in case of reserved MAC
+ * address in the NVM, or if address is given but invalid.
+ */
+ if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
+ (memcmp(etherbroadcastaddr, data->hw_addr,
+ sizeof(etherbroadcastaddr)) != 0) &&
+ (memcmp(etheranyaddr, data->hw_addr,
+ sizeof(etheranyaddr)) != 0) &&
+ !ETHER_IS_MULTICAST(data->hw_addr))
+ return;
+ }
+
+ if (nvm_hw) {
+ /* Read the mac address from WFMP registers. */
+ uint32_t mac_addr0, mac_addr1;
+
+ if (!iwx_nic_lock(sc))
+ goto out;
+ mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
+ mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
+ iwx_nic_unlock(sc);
+
+ hw_addr = (const uint8_t *)&mac_addr0;
+ data->hw_addr[0] = hw_addr[3];
+ data->hw_addr[1] = hw_addr[2];
+ data->hw_addr[2] = hw_addr[1];
+ data->hw_addr[3] = hw_addr[0];
+
+ hw_addr = (const uint8_t *)&mac_addr1;
+ data->hw_addr[4] = hw_addr[1];
+ data->hw_addr[5] = hw_addr[0];
+
+ return;
+ }
+out:
+ printf("%s: mac address not found\n", DEVNAME(sc));
+ memset(data->hw_addr, 0, sizeof(data->hw_addr));
+}
+
+int
+iwx_parse_nvm_data(struct iwx_softc *sc, const uint16_t *nvm_hw,
+ const uint16_t *nvm_sw, const uint16_t *nvm_calib,
+ const uint16_t *mac_override, const uint16_t *phy_sku,
+ const uint16_t *regulatory, int n_regulatory)
+{
+ struct iwx_nvm_data *data = &sc->sc_nvm;
+ uint32_t sku, radio_cfg;
+ uint16_t lar_config, lar_offset;
+
+ data->nvm_version = le16_to_cpup(nvm_sw + IWX_NVM_VERSION);
+
+ radio_cfg = le32_to_cpup((uint32_t *)(phy_sku + IWX_RADIO_CFG_8000));
+ data->radio_cfg_type = IWX_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
+ data->radio_cfg_step = IWX_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
+ data->radio_cfg_dash = IWX_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
+ data->radio_cfg_pnum = IWX_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
+ data->valid_tx_ant = IWX_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
+ data->valid_rx_ant = IWX_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
+
+ sku = le32_to_cpup((uint32_t *)(phy_sku + IWX_SKU_8000));
+ data->sku_cap_band_24GHz_enable = sku & IWX_NVM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52GHz_enable = sku & IWX_NVM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_11n_enable = sku & IWX_NVM_SKU_CAP_11N_ENABLE;
+ data->sku_cap_mimo_disable = sku & IWX_NVM_SKU_CAP_MIMO_DISABLE;
+
+ lar_offset = data->nvm_version < 0xE39 ?
+ IWX_NVM_LAR_OFFSET_8000_OLD :
+ IWX_NVM_LAR_OFFSET_8000;
+
+ lar_config = le16_to_cpup(regulatory + lar_offset);
+ data->n_hw_addrs = le16_to_cpup(nvm_sw + IWX_N_HW_ADDRS_8000);
+ iwx_set_hw_address_8000(sc, data, mac_override, nvm_hw);
+
+ iwx_init_channel_map(sc, &regulatory[IWX_NVM_CHANNELS_8000],
+ iwx_nvm_channels_8000,
+ MIN(n_regulatory, nitems(iwx_nvm_channels_8000)));
+
+ data->calib_version = 255; /* TODO:
+ this value will prevent some checks from
+ failing, we need to check if this
+ field is still needed, and if it does,
+ where is it in the NVM */
+
+ return 0;
+}
+
+int
+iwx_parse_nvm_sections(struct iwx_softc *sc, struct iwx_nvm_section *sections)
+{
+ const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
+ const uint16_t *regulatory = NULL;
+ int n_regulatory = 0;
+
+ /* Checking for required sections */
+
+ /* SW and REGULATORY sections are mandatory */
+ if (!sections[IWX_NVM_SECTION_TYPE_SW].data ||
+ !sections[IWX_NVM_SECTION_TYPE_REGULATORY].data) {
+ return ENOENT;
+ }
+ /* MAC_OVERRIDE or at least HW section must exist */
+ if (!sections[IWX_NVM_SECTION_TYPE_HW_8000].data &&
+ !sections[IWX_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+ return ENOENT;
+ }
+
+ /* PHY_SKU section is mandatory in B0 */
+ if (!sections[IWX_NVM_SECTION_TYPE_PHY_SKU].data) {
+ return ENOENT;
+ }
+
+ regulatory = (const uint16_t *)
+ sections[IWX_NVM_SECTION_TYPE_REGULATORY].data;
+ n_regulatory = sections[IWX_NVM_SECTION_TYPE_REGULATORY].length;
+ hw = (const uint16_t *)
+ sections[IWX_NVM_SECTION_TYPE_HW_8000].data;
+ mac_override =
+ (const uint16_t *)
+ sections[IWX_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+ phy_sku = (const uint16_t *)
+ sections[IWX_NVM_SECTION_TYPE_PHY_SKU].data;
+
+ sw = (const uint16_t *)sections[IWX_NVM_SECTION_TYPE_SW].data;
+ calib = (const uint16_t *)
+ sections[IWX_NVM_SECTION_TYPE_CALIBRATION].data;
+
+ /* XXX should pass in the length of every section */
+ return iwx_parse_nvm_data(sc, hw, sw, calib, mac_override,
+ phy_sku, regulatory, n_regulatory);
+}
+
+int
+iwx_nvm_init(struct iwx_softc *sc)
+{
+ struct iwx_nvm_section nvm_sections[IWX_NVM_NUM_OF_SECTIONS];
+ int i, section, err;
+ uint16_t len;
+ uint8_t *buf;
+ const size_t bufsz = sc->sc_nvm_max_section_size;
+
+ memset(nvm_sections, 0, sizeof(nvm_sections));
+
+ buf = malloc(bufsz, M_DEVBUF, M_WAIT);
+ if (buf == NULL)
+ return ENOMEM;
+
+ for (i = 0; i < nitems(iwx_nvm_to_read); i++) {
+ section = iwx_nvm_to_read[i];
+ KASSERT(section <= nitems(nvm_sections));
+
+ err = iwx_nvm_read_section(sc, section, buf, &len, bufsz);
+ if (err) {
+ err = 0;
+ continue;
+ }
+ nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
+ if (nvm_sections[section].data == NULL) {
+ err = ENOMEM;
+ break;
+ }
+ memcpy(nvm_sections[section].data, buf, len);
+ nvm_sections[section].length = len;
+ }
+ free(buf, M_DEVBUF, bufsz);
+ if (err == 0)
+ err = iwx_parse_nvm_sections(sc, nvm_sections);
+
+ for (i = 0; i < IWX_NVM_NUM_OF_SECTIONS; i++) {
+ if (nvm_sections[i].data != NULL)
+ free(nvm_sections[i].data, M_DEVBUF,
+ nvm_sections[i].length);
+ }
+
+ return err;
+}
+
+int
+iwx_load_firmware(struct iwx_softc *sc)
+{
+ struct iwx_fw_sects *fws;
+ int err, w;
+
+ sc->sc_uc.uc_intr = 0;
+
+ fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
+ err = iwx_ctxt_info_init(sc, fws);
+ if (err) {
+ printf("%s: could not init context info\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* wait for the firmware to load */
+ for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
+ err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", MSEC_TO_NSEC(100));
+ }
+ if (err || !sc->sc_uc.uc_ok)
+ printf("%s: could not load firmware\n", DEVNAME(sc));
+ if (!sc->sc_uc.uc_ok)
+ return EINVAL;
+
+ return err;
+}
+
+int
+iwx_start_fw(struct iwx_softc *sc)
+{
+ int err;
+
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+
+ err = iwx_nic_init(sc);
+ if (err) {
+ printf("%s: unable to init nic\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
+ IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
+ IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable firwmare load interrupt */
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ iwx_enable_fwload_interrupt(sc);
+
+ return iwx_load_firmware(sc);
+}
+
+int
+iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
+{
+ struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = htole32(valid_tx_ant),
+ };
+
+ return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
+ 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+int
+iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
+{
+ struct iwx_phy_cfg_cmd phy_cfg_cmd;
+
+ phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
+ phy_cfg_cmd.calib_control.event_trigger =
+ sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+int
+iwx_send_dqa_cmd(struct iwx_softc *sc)
+{
+ struct iwx_dqa_enable_cmd dqa_cmd = {
+ .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
+ };
+ uint32_t cmd_id;
+
+ cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+}
+
+int
+iwx_load_ucode_wait_alive(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_read_firmware(sc);
+ if (err)
+ return err;
+
+ err = iwx_start_fw(sc);
+ if (err)
+ return err;
+
+ iwx_post_alive(sc);
+
+ return 0;
+}
+
+int
+iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
+{
+ const int wait_flags = IWX_INIT_COMPLETE;
+ struct iwx_nvm_access_complete_cmd nvm_complete = {};
+ struct iwx_init_extended_cfg_cmd init_cfg = {
+ .init_flags = htole32(IWX_INIT_NVM),
+ };
+ int err;
+
+ if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
+ printf("%s: radio is disabled by hardware switch\n",
+ DEVNAME(sc));
+ return EPERM;
+ }
+
+ sc->sc_init_complete = 0;
+ err = iwx_load_ucode_wait_alive(sc);
+ if (err) {
+ printf("%s: failed to load init firmware\n", DEVNAME(sc));
+ return err;
+ }
+
+ /*
+ * Send init config command to mark that we are sending NVM
+ * access commands
+ */
+ err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
+ IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
+ if (err)
+ return err;
+
+ if (readnvm) {
+ err = iwx_nvm_init(sc);
+ if (err) {
+ printf("%s: failed to read nvm\n", DEVNAME(sc));
+ return err;
+ }
+ }
+
+ err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
+ if (err)
+ return err;
+
+ /* Wait for the init complete notification from the firmware. */
+ while ((sc->sc_init_complete & wait_flags) != wait_flags) {
+ err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
+ SEC_TO_NSEC(2));
+ if (err)
+ return err;
+ }
+
+ if (readnvm && IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
+ sc->sc_nvm.hw_addr);
+ return 0;
+}
+
+int
+iwx_config_ltr(struct iwx_softc *sc)
+{
+ struct iwx_ltr_config_cmd cmd = {
+ .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!sc->sc_ltr_enabled)
+ return 0;
+
+ return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
+}
+
+void
+iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
+{
+ struct iwx_rx_data *data = &ring->data[idx];
+
+ ((uint64_t *)ring->desc)[idx] =
+ htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
+ bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
+ idx * sizeof(uint64_t), sizeof(uint64_t),
+ BUS_DMASYNC_PREWRITE);
+}
+
+int
+iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
+{
+ struct iwx_rx_ring *ring = &sc->rxq;
+ struct iwx_rx_data *data = &ring->data[idx];
+ struct mbuf *m;
+ int err;
+ int fatal = 0;
+
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return ENOBUFS;
+
+ if (size <= MCLBYTES) {
+ MCLGET(m, M_DONTWAIT);
+ } else {
+ MCLGETI(m, M_DONTWAIT, NULL, IWX_RBUF_SIZE);
+ }
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ if (data->m != NULL) {
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ fatal = 1;
+ }
+
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+ err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
+ BUS_DMA_READ|BUS_DMA_NOWAIT);
+ if (err) {
+ /* XXX */
+ if (fatal)
+ panic("%s: could not load RX mbuf", DEVNAME(sc));
+ m_freem(m);
+ return err;
+ }
+ data->m = m;
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
+
+ /* Update RX descriptor. */
+ iwx_update_rx_desc(sc, ring, idx);
+
+ return 0;
+}
+
+int
+iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
+ struct iwx_rx_mpdu_desc *desc)
+{
+ int energy_a, energy_b;
+
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ energy_a = energy_a ? -energy_a : -256;
+ energy_b = energy_b ? -energy_b : -256;
+ return MAX(energy_a, energy_b);
+}
+
+void
+iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
+ sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
+
+ memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
+}
+
+/*
+ * Retrieve the average noise (in dBm) among receivers.
+ */
+int
+iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
+{
+ int i, total, nbant, noise;
+
+ total = nbant = noise = 0;
+ for (i = 0; i < 3; i++) {
+ noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
+ if (noise) {
+ total += noise;
+ nbant++;
+ }
+ }
+
+ /* There should be at least one antenna but check anyway. */
+ return (nbant == 0) ? -127 : (total / nbant) - 107;
+}
+
+void
+iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
+ int is_shortpre, int rate_n_flags, uint32_t device_timestamp,
+ struct ieee80211_rxinfo *rxi, struct mbuf_list *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ struct ieee80211_channel *bss_chan;
+ uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
+
+ if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
+ chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ni = ieee80211_find_rxnode(ic, wh);
+ if (ni == ic->ic_bss) {
+ /*
+ * We may switch ic_bss's channel during scans.
+ * Record the current channel so we can restore it later.
+ */
+ bss_chan = ni->ni_chan;
+ IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
+ }
+ ni->ni_chan = &ic->ic_channels[chanidx];
+
+#if NBPFILTER > 0
+ if (sc->sc_drvbpf != NULL) {
+ struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
+ uint16_t chan_flags;
+
+ tap->wr_flags = 0;
+ if (is_shortpre)
+ tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ tap->wr_chan_freq =
+ htole16(ic->ic_channels[chanidx].ic_freq);
+ chan_flags = ic->ic_channels[chanidx].ic_flags;
+ if (ic->ic_curmode != IEEE80211_MODE_11N)
+ chan_flags &= ~IEEE80211_CHAN_HT;
+ tap->wr_chan_flags = htole16(chan_flags);
+ tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
+ tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
+ tap->wr_tsft = device_timestamp;
+ if (rate_n_flags & IWX_RATE_HT_MCS_RATE_CODE_MSK) {
+ uint8_t mcs = (rate_n_flags &
+ (IWX_RATE_HT_MCS_RATE_CODE_MSK |
+ IWX_RATE_HT_MCS_NSS_MSK));
+ tap->wr_rate = (0x80 | mcs);
+ } else {
+ uint8_t rate = (rate_n_flags &
+ IWX_RATE_LEGACY_RATE_MSK);
+ switch (rate) {
+ /* CCK rates. */
+ case 10: tap->wr_rate = 2; break;
+ case 20: tap->wr_rate = 4; break;
+ case 55: tap->wr_rate = 11; break;
+ case 110: tap->wr_rate = 22; break;
+ /* OFDM rates. */
+ case 0xd: tap->wr_rate = 12; break;
+ case 0xf: tap->wr_rate = 18; break;
+ case 0x5: tap->wr_rate = 24; break;
+ case 0x7: tap->wr_rate = 36; break;
+ case 0x9: tap->wr_rate = 48; break;
+ case 0xb: tap->wr_rate = 72; break;
+ case 0x1: tap->wr_rate = 96; break;
+ case 0x3: tap->wr_rate = 108; break;
+ /* Unknown rate: should not happen. */
+ default: tap->wr_rate = 0;
+ }
+ }
+
+ bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
+ m, BPF_DIRECTION_IN);
+ }
+#endif
+ ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
+ /*
+ * ieee80211_inputm() might have changed our BSS.
+ * Restore ic_bss's channel if we are still in the same BSS.
+ */
+ if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
+ ni->ni_chan = bss_chan;
+ ieee80211_release_node(ic, ni);
+}
+
+void
+iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
+ size_t maxlen, struct mbuf_list *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_rxinfo rxi;
+ struct iwx_rx_mpdu_desc *desc;
+ uint32_t len, hdrlen, rate_n_flags, device_timestamp;
+ int rssi;
+ uint8_t chanidx;
+ uint16_t phy_info;
+
+ desc = (struct iwx_rx_mpdu_desc *)pktdata;
+
+ if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
+ !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK)))
+ return; /* drop */
+
+ len = le16toh(desc->mpdu_len);
+ if (len < IEEE80211_MIN_LEN) {
+ ic->ic_stats.is_rx_tooshort++;
+ IC2IFP(ic)->if_ierrors++;
+ return;
+ }
+ if (len > maxlen - sizeof(*desc)) {
+ IC2IFP(ic)->if_ierrors++;
+ return;
+ }
+
+ m->m_data = pktdata + sizeof(*desc);
+ m->m_pkthdr.len = m->m_len = len;
+
+ /* Account for padding following the frame header. */
+ if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL) {
+ switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
+ case IEEE80211_FC0_SUBTYPE_CTS:
+ hdrlen = sizeof(struct ieee80211_frame_cts);
+ break;
+ case IEEE80211_FC0_SUBTYPE_ACK:
+ hdrlen = sizeof(struct ieee80211_frame_ack);
+ break;
+ default:
+ hdrlen = sizeof(struct ieee80211_frame_min);
+ break;
+ }
+ } else
+ hdrlen = ieee80211_get_hdrlen(wh);
+ memmove(m->m_data + 2, m->m_data, hdrlen);
+ m_adj(m, 2);
+ }
+
+ phy_info = le16toh(desc->phy_info);
+ rate_n_flags = le32toh(desc->v1.rate_n_flags);
+ chanidx = desc->v1.channel;
+ device_timestamp = desc->v1.gp2_on_air_rise;
+
+ rssi = iwx_rxmq_get_signal_strength(sc, desc);
+ rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
+ rssi = MIN(rssi, ic->ic_max_rssi); /* clip to max. 100% */
+
+ memset(&rxi, 0, sizeof(rxi));
+ rxi.rxi_rssi = rssi;
+ rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
+
+ iwx_rx_frame(sc, m, chanidx,
+ (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, &rxi, ml);
+}
+
+void
+iwx_enable_ht_cck_fallback(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ uint8_t rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
+ uint8_t min_rval = ieee80211_min_basic_rate(ic);
+ int i;
+
+ /* Are CCK frames forbidden in our BSS? */
+ if (IWX_RVAL_IS_OFDM(min_rval))
+ return;
+
+ in->ht_force_cck = 1;
+
+ ieee80211_mira_cancel_timeouts(&in->in_mn);
+ ieee80211_mira_node_init(&in->in_mn);
+ ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
+
+ /* Choose initial CCK Tx rate. */
+ ni->ni_txrate = 0;
+ for (i = 0; i < rs->rs_nrates; i++) {
+ rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
+ if (rval == min_rval) {
+ ni->ni_txrate = i;
+ break;
+ }
+ }
+}
+
+void
+iwx_rx_tx_cmd_single(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_node *in)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwx_tx_resp *tx_resp = (void *)pkt->data;
+ int status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
+ int txfail;
+
+ KASSERT(tx_resp->frame_count == 1);
+
+ txfail = (status != IWX_TX_STATUS_SUCCESS &&
+ status != IWX_TX_STATUS_DIRECT_DONE);
+
+ /* Update rate control statistics. */
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) {
+ in->in_amn.amn_txcnt++;
+ if (in->ht_force_cck) {
+ /*
+ * We want to move back to OFDM quickly if possible.
+ * Only show actual Tx failures to AMRR, not retries.
+ */
+ if (txfail)
+ in->in_amn.amn_retrycnt++;
+ } else if (tx_resp->failure_frame > 0)
+ in->in_amn.amn_retrycnt++;
+ } else if (ic->ic_fixed_mcs == -1) {
+ in->in_mn.frames += tx_resp->frame_count;
+ in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
+ in->in_mn.agglen = tx_resp->frame_count;
+ if (tx_resp->failure_frame > 0)
+ in->in_mn.retries += tx_resp->failure_frame;
+ if (txfail)
+ in->in_mn.txfail += tx_resp->frame_count;
+ if (ic->ic_state == IEEE80211_S_RUN && !in->ht_force_cck) {
+ int otxmcs = ni->ni_txmcs;
+
+ ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
+
+ /* Fall back to CCK rates if MCS 0 is failing. */
+ if (txfail && IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) &&
+ otxmcs == 0 && ni->ni_txmcs == 0)
+ iwx_enable_ht_cck_fallback(sc, in);
+ }
+ }
+
+ if (txfail)
+ ifp->if_oerrors++;
+}
+
+void
+iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, txd->map);
+ m_freem(txd->m);
+ txd->m = NULL;
+
+ KASSERT(txd->in);
+ ieee80211_release_node(ic, &txd->in->in_ni);
+ txd->in = NULL;
+
+ KASSERT(txd->done == 0);
+ txd->done = 1;
+}
+
+void
+iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
+ int idx = cmd_hdr->idx;
+ int qid = cmd_hdr->qid;
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ struct iwx_tx_data *txd;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
+ BUS_DMASYNC_POSTREAD);
+
+ sc->sc_tx_timer = 0;
+
+ txd = &ring->data[idx];
+ if (txd->done)
+ return;
+
+ iwx_rx_tx_cmd_single(sc, pkt, txd->in);
+ iwx_txd_done(sc, txd);
+
+ /*
+ * XXX Sometimes we miss Tx completion interrupts.
+ * We cannot check Tx success/failure for affected frames; just free
+ * the associated mbuf and release the associated node reference.
+ */
+ while (ring->tail != idx) {
+ txd = &ring->data[ring->tail];
+ if (!txd->done) {
+ DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
+ __func__, ring->tail, idx));
+ iwx_txd_done(sc, txd);
+ ring->queued--;
+ }
+ ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
+ }
+
+ if (--ring->queued < IWX_TX_RING_LOMARK) {
+ sc->qfullmsk &= ~(1 << ring->qid);
+ if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
+ ifq_clr_oactive(&ifp->if_snd);
+ /*
+ * Well, we're in interrupt context, but then again
+ * I guess net80211 does all sorts of stunts in
+ * interrupt context, so maybe this is no biggie.
+ */
+ (*ifp->if_start)(ifp);
+ }
+ }
+}
+
+void
+iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
+ uint32_t missed;
+
+ if ((ic->ic_opmode != IEEE80211_M_STA) ||
+ (ic->ic_state != IEEE80211_S_RUN))
+ return;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
+ sizeof(*mbn), BUS_DMASYNC_POSTREAD);
+
+ missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
+ if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
+ if (ic->ic_if.if_flags & IFF_DEBUG)
+ printf("%s: receiving no beacons from %s; checking if "
+ "this AP is still responding to probe requests\n",
+ DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
+ /*
+ * Rather than go directly to scan state, try to send a
+ * directed probe request first. If that fails then the
+ * state machine will drop us into scanning after timing
+ * out waiting for a probe response.
+ */
+ IEEE80211_SEND_MGMT(ic, ic->ic_bss,
+ IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
+ }
+
+}
+
+int
+iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
+{
+ struct iwx_binding_cmd cmd;
+ struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
+ uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
+ int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
+ uint32_t status;
+
+ if (action == IWX_FW_CTXT_ACTION_ADD && active)
+ panic("binding already added");
+ if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
+ panic("binding already removed");
+
+ if (phyctxt == NULL) /* XXX race with iwx_stop() */
+ return EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+ cmd.action = htole32(action);
+ cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+
+ cmd.macs[0] = htole32(mac_id);
+ for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
+
+ if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ status = 0;
+ err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
+ &cmd, &status);
+ if (err == 0 && status != 0)
+ err = EIO;
+
+ return err;
+}
+
+void
+iwx_phy_ctxt_cmd_hdr(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ struct iwx_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
+{
+ memset(cmd, 0, sizeof(struct iwx_phy_context_cmd));
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd->action = htole32(action);
+ cmd->apply_time = htole32(apply_time);
+}
+
+void
+iwx_phy_ctxt_cmd_data(struct iwx_softc *sc, struct iwx_phy_context_cmd *cmd,
+ struct ieee80211_channel *chan, uint8_t chains_static,
+ uint8_t chains_dynamic)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint8_t active_cnt, idle_cnt;
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
+ cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
+ cmd->ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
+ cmd->ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd->ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ } else {
+ struct iwx_fw_channel_info_v1 *ci_v1;
+ ci_v1 = (struct iwx_fw_channel_info_v1 *)&cmd->ci;
+ ci_v1->band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
+ ci_v1->channel = ieee80211_chan2ieee(ic, chan);
+ ci_v1->width = IWX_PHY_VHT_CHANNEL_MODE20;
+ ci_v1->ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+ /* Set rx the chains */
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ cmd->rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd->rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd->rxchain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+
+ cmd->txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
+}
+
+int
+iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
+ uint32_t apply_time)
+{
+ struct iwx_phy_context_cmd cmd;
+ size_t len;
+
+ iwx_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
+
+ /*
+ * Intel resized fw_channel_info struct and neglected to resize the
+ * phy_context_cmd struct which contains it; so magic happens with
+ * command length adjustments at run-time... :(
+ */
+ iwx_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
+ chains_static, chains_dynamic);
+ len = sizeof(struct iwx_phy_context_cmd);
+ if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
+ len -= (sizeof(struct iwx_fw_channel_info) -
+ sizeof(struct iwx_fw_channel_info_v1));
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, len, &cmd);
+}
+
+int
+iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
+{
+ struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
+ struct iwx_tfh_tfd *desc;
+ struct iwx_tx_data *txdata;
+ struct iwx_device_cmd *cmd;
+ struct mbuf *m;
+ bus_addr_t paddr;
+ uint64_t addr;
+ int err = 0, i, paylen, off, s;
+ int idx, code, async, group_id;
+ size_t hdrlen, datasz;
+ uint8_t *data;
+ int generation = sc->sc_generation;
+
+ code = hcmd->id;
+ async = hcmd->flags & IWX_CMD_ASYNC;
+ idx = ring->cur;
+
+ for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
+ paylen += hcmd->len[i];
+ }
+
+ /* If this command waits for a response, allocate response buffer. */
+ hcmd->resp_pkt = NULL;
+ if (hcmd->flags & IWX_CMD_WANT_RESP) {
+ uint8_t *resp_buf;
+ KASSERT(!async);
+ KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
+ KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
+ if (sc->sc_cmd_resp_pkt[idx] != NULL)
+ return ENOSPC;
+ resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (resp_buf == NULL)
+ return ENOMEM;
+ sc->sc_cmd_resp_pkt[idx] = resp_buf;
+ sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
+ } else {
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ }
+
+ s = splnet();
+
+ desc = &ring->desc[idx];
+ txdata = &ring->data[idx];
+
+ group_id = iwx_cmd_groupid(code);
+ if (group_id != 0) {
+ hdrlen = sizeof(cmd->hdr_wide);
+ datasz = sizeof(cmd->data_wide);
+ } else {
+ hdrlen = sizeof(cmd->hdr);
+ datasz = sizeof(cmd->data);
+ }
+
+ if (paylen > datasz) {
+ /* Command is too large to fit in pre-allocated space. */
+ size_t totlen = hdrlen + paylen;
+ if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
+ printf("%s: firmware command too long (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ err = EINVAL;
+ goto out;
+ }
+ m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
+ if (m == NULL) {
+ printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ err = ENOMEM;
+ goto out;
+ }
+ cmd = mtod(m, struct iwx_device_cmd *);
+ err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
+ totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+ if (err) {
+ printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ m_freem(m);
+ goto out;
+ }
+ txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
+ paddr = txdata->map->dm_segs[0].ds_addr;
+ } else {
+ cmd = &ring->cmd[idx];
+ paddr = txdata->cmd_paddr;
+ }
+
+ if (group_id != 0) {
+ cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
+ cmd->hdr_wide.group_id = group_id;
+ cmd->hdr_wide.qid = ring->qid;
+ cmd->hdr_wide.idx = idx;
+ cmd->hdr_wide.length = htole16(paylen);
+ cmd->hdr_wide.version = iwx_cmd_version(code);
+ data = cmd->data_wide;
+ } else {
+ cmd->hdr.code = code;
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = idx;
+ data = cmd->data;
+ }
+
+ for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
+ if (hcmd->len[i] == 0)
+ continue;
+ memcpy(data + off, hcmd->data[i], hcmd->len[i]);
+ off += hcmd->len[i];
+ }
+ KASSERT(off == paylen);
+
+ desc->tbs[0].tb_len = htole16(hdrlen + paylen);
+ addr = htole64((uint64_t)paddr);
+ memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
+ desc->num_tbs = 1;
+
+ if (paylen > datasz) {
+ bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
+ hdrlen + paylen, BUS_DMASYNC_PREWRITE);
+ } else {
+ bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
+ (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
+ hdrlen + paylen, BUS_DMASYNC_PREWRITE);
+ }
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
+ (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
+ sizeof (*desc), BUS_DMASYNC_PREWRITE);
+ /* Kick command ring. */
+ DPRINTF(("%s: sending command 0x%x\n", __func__, code));
+ ring->queued++;
+ ring->cur = (ring->cur + 1) % IWX_CMD_QUEUE_SIZE;
+ IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
+
+ if (!async) {
+ err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
+ if (err == 0) {
+ /* if hardware is no longer up, return error */
+ if (generation != sc->sc_generation) {
+ err = ENXIO;
+ goto out;
+ }
+
+ /* Response buffer will be freed in iwx_free_resp(). */
+ hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ } else if (generation == sc->sc_generation) {
+ free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
+ sc->sc_cmd_resp_len[idx]);
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ }
+ }
+ out:
+ splx(s);
+
+ return err;
+}
+
+int
+iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
+ uint16_t len, const void *data)
+{
+ struct iwx_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwx_send_cmd(sc, &cmd);
+}
+
+int
+iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
+ uint32_t *status)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_cmd_response *resp;
+ int err, resp_len;
+
+ KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
+ cmd->flags |= IWX_CMD_WANT_RESP;
+ cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
+
+ err = iwx_send_cmd(sc, cmd);
+ if (err)
+ return err;
+
+ pkt = cmd->resp_pkt;
+ if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
+ return EIO;
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ iwx_free_resp(sc, cmd);
+ return EIO;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32toh(resp->status);
+ iwx_free_resp(sc, cmd);
+ return err;
+}
+
+int
+iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
+ const void *data, uint32_t *status)
+{
+ struct iwx_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwx_send_cmd_status(sc, &cmd, status);
+}
+
+void
+iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
+{
+ KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
+ free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
+ hcmd->resp_pkt = NULL;
+}
+
+void
+iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
+{
+ struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
+ struct iwx_tx_data *data;
+
+ if (qid != IWX_DQA_CMD_QUEUE) {
+ return; /* Not a command ack. */
+ }
+
+ data = &ring->data[idx];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ wakeup(&ring->desc[idx]);
+
+ DPRINTF(("%s: command 0x%x done\n", __func__, code));
+ if (ring->queued == 0) {
+ if (code != IWX_NVM_ACCESS_CMD)
+ DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
+ DEVNAME(sc), code));
+ } else if (ring->queued > 0)
+ ring->queued--;
+}
+
+/*
+ * Fill in various bit for management frames, and leave them
+ * unfilled for data frames (firmware takes care of that).
+ * Return the selected TX rate.
+ */
+const struct iwx_rate *
+iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
+ struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ const struct iwx_rate *rinfo;
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
+ int ridx, rate_flags;
+ uint32_t flags = 0;
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ type != IEEE80211_FC0_TYPE_DATA) {
+ /* for non-data, use the lowest supported rate */
+ ridx = min_ridx;
+ } else if (ic->ic_fixed_mcs != -1) {
+ ridx = sc->sc_fixed_ridx;
+ } else if (ic->ic_fixed_rate != -1) {
+ ridx = sc->sc_fixed_ridx;
+ } else if ((ni->ni_flags & IEEE80211_NODE_HT) && !in->ht_force_cck) {
+ ridx = iwx_mcs2ridx[ni->ni_txmcs];
+ } else {
+ uint8_t rval;
+ rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
+ ridx = iwx_rval2ridx(rval);
+ if (ridx < min_ridx)
+ ridx = min_ridx;
+ }
+
+ flags = (IWX_TX_FLAGS_CMD_RATE | IWX_TX_FLAGS_ENCRYPT_DIS);
+ if ((ic->ic_flags & IEEE80211_F_RSNON) &&
+ ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
+ flags |= IWX_TX_FLAGS_HIGH_PRI;
+ tx->flags = htole32(flags);
+
+ rinfo = &iwx_rates[ridx];
+ if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
+ rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
+ else
+ rate_flags = IWX_RATE_MCS_ANT_A_MSK;
+ if (IWX_RIDX_IS_CCK(ridx))
+ rate_flags |= IWX_RATE_MCS_CCK_MSK;
+ if ((ni->ni_flags & IEEE80211_NODE_HT) &&
+ rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
+ rate_flags |= IWX_RATE_MCS_HT_MSK;
+ tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
+ } else
+ tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
+
+ return rinfo;
+}
+
+#if 0
+/*
+ * necessary only for block ack mode
+ */
+void
+iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, uint16_t byte_cnt,
+ uint16_t num_tbs)
+{
+ uint8_t filled_tfd_size, num_fetch_chunks;
+ uint16_t len = byte_cnt;
+ uint16_t bc_ent;
+ struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
+
+ filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwx_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
+
+ /* Before AX210, the HW expects DW */
+ len = howmany(len, 4);
+ bc_ent = htole16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[txq->cur] = bc_ent;
+}
+#endif
+
+int
+iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ni;
+ struct iwx_tx_ring *ring;
+ struct iwx_tx_data *data;
+ struct iwx_tfh_tfd *desc;
+ struct iwx_device_cmd *cmd;
+ struct iwx_tx_cmd_gen2 *tx;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *k = NULL;
+ const struct iwx_rate *rinfo;
+ uint64_t paddr;
+ u_int hdrlen;
+ bus_dma_segment_t *seg;
+ uint16_t num_tbs;
+ uint8_t type;
+ int i, totlen, err, pad;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ hdrlen = ieee80211_get_hdrlen(wh);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ /*
+ * Map EDCA categories to Tx data queues.
+ *
+ * We use static data queue assignments even in DQA mode. We do not
+ * need to share Tx queues between stations because we only implement
+ * client mode; the firmware's station table contains only one entry
+ * which represents our access point.
+ *
+ * Tx aggregation will require additional queues (one queue per TID
+ * for which aggregation is enabled) but we do not implement this yet.
+ */
+ ring = &sc->txq[IWX_DQA_MIN_MGMT_QUEUE + ac];
+ desc = &ring->desc[ring->cur];
+ memset(desc, 0, sizeof(*desc));
+ data = &ring->data[ring->cur];
+
+ cmd = &ring->cmd[ring->cur];
+ cmd->hdr.code = IWX_TX_CMD;
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = ring->cur;
+
+ tx = (void *)cmd->data;
+ memset(tx, 0, sizeof(*tx));
+
+ rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
+
+#if NBPFILTER > 0
+ if (sc->sc_drvbpf != NULL) {
+ struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
+ uint16_t chan_flags;
+
+ tap->wt_flags = 0;
+ tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
+ chan_flags = ni->ni_chan->ic_flags;
+ if (ic->ic_curmode != IEEE80211_MODE_11N)
+ chan_flags &= ~IEEE80211_CHAN_HT;
+ tap->wt_chan_flags = htole16(chan_flags);
+ if ((ni->ni_flags & IEEE80211_NODE_HT) &&
+ !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ type == IEEE80211_FC0_TYPE_DATA &&
+ rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
+ tap->wt_rate = (0x80 | rinfo->ht_plcp);
+ } else
+ tap->wt_rate = rinfo->rate;
+ tap->wt_hwqueue = ac;
+ if ((ic->ic_flags & IEEE80211_F_WEPON) &&
+ (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
+ tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
+
+ bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
+ m, BPF_DIRECTION_OUT);
+ }
+#endif
+
+ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
+ k = ieee80211_get_txkey(ic, wh, ni);
+ if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
+ return ENOBUFS;
+ /* 802.11 header may have moved. */
+ wh = mtod(m, struct ieee80211_frame *);
+ }
+ totlen = m->m_pkthdr.len;
+
+ if (hdrlen & 3) {
+ /* First segment length must be a multiple of 4. */
+ pad = 4 - (hdrlen & 3);
+ tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
+ } else
+ pad = 0;
+
+ tx->len = htole16(totlen);
+
+ /* Copy 802.11 header in TX command. */
+ memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
+
+ /* Trim 802.11 header. */
+ m_adj(m, hdrlen);
+
+ err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
+ BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+ if (err && err != EFBIG) {
+ printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
+ m_freem(m);
+ return err;
+ }
+ if (err) {
+ /* Too many DMA segments, linearize mbuf. */
+ if (m_defrag(m, M_DONTWAIT)) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+ err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
+ BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+ if (err) {
+ printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
+ err);
+ m_freem(m);
+ return err;
+ }
+ }
+ data->m = m;
+ data->in = in;
+ data->done = 0;
+
+ /* Fill TX descriptor. */
+ num_tbs = 2 + data->map->dm_nsegs;
+ desc->num_tbs = htole16(num_tbs);
+
+ desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
+ paddr = htole64(data->cmd_paddr);
+ memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
+ DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
+ desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
+ sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
+ paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
+ memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
+
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
+ DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
+
+ /* Other DMA segments are for data payload. */
+ seg = data->map->dm_segs;
+ for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
+ desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
+ paddr = htole64(seg->ds_addr);
+ memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
+ DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
+ (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
+ sizeof (*cmd), BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
+ (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
+ sizeof (*desc), BUS_DMASYNC_PREWRITE);
+
+#if 0
+ iwx_tx_update_byte_tbl(ring, totlen, num_tbs);
+#endif
+
+ /* Kick TX ring. */
+ ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
+ IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
+
+ /* Mark TX ring as full if we reach a certain threshold. */
+ if (++ring->queued > IWX_TX_RING_HIMARK) {
+ sc->qfullmsk |= 1 << ring->qid;
+ }
+
+ return 0;
+}
+
+int
+iwx_flush_tx_path(struct iwx_softc *sc)
+{
+ struct iwx_tx_path_flush_cmd flush_cmd = {
+ .sta_id = htole32(IWX_STATION_ID),
+ .tid_mask = htole16(0xffff),
+ };
+ int err;
+
+ err = iwx_send_cmd_pdu(sc, IWX_TXPATH_FLUSH, 0,
+ sizeof(flush_cmd), &flush_cmd);
+ if (err)
+ printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
+ return err;
+}
+
+#define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+int
+iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
+ struct iwx_beacon_filter_cmd *cmd)
+{
+ return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
+ 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
+}
+
+int
+iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
+{
+ struct iwx_beacon_filter_cmd cmd = {
+ IWX_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(enable),
+ };
+
+ if (!sc->sc_bf.bf_enabled)
+ return 0;
+
+ sc->sc_bf.ba_enabled = enable;
+ return iwx_beacon_filter_send_cmd(sc, &cmd);
+}
+
+void
+iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_power_cmd *cmd)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ int dtim_period, dtim_msec, keep_alive;
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ if (ni->ni_dtimperiod)
+ dtim_period = ni->ni_dtimperiod;
+ else
+ dtim_period = 1;
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM.
+ */
+ dtim_msec = dtim_period * ni->ni_intval;
+ keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = roundup(keep_alive, 1000) / 1000;
+ cmd->keep_alive_seconds = htole16(keep_alive);
+
+ if (ic->ic_opmode != IEEE80211_M_MONITOR)
+ cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+}
+
+int
+iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err;
+ int ba_enable;
+ struct iwx_mac_power_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwx_power_build_cmd(sc, in, &cmd);
+
+ err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
+ sizeof(cmd), &cmd);
+ if (err != 0)
+ return err;
+
+ ba_enable = !!(cmd.flags &
+ htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+ return iwx_update_beacon_abort(sc, in, ba_enable);
+}
+
+int
+iwx_power_update_device(struct iwx_softc *sc)
+{
+ struct iwx_device_power_cmd cmd = { };
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (ic->ic_opmode != IEEE80211_M_MONITOR)
+ cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ return iwx_send_cmd_pdu(sc,
+ IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
+}
+
+int
+iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_beacon_filter_cmd cmd = {
+ IWX_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
+ };
+ int err;
+
+ err = iwx_beacon_filter_send_cmd(sc, &cmd);
+ if (err == 0)
+ sc->sc_bf.bf_enabled = 1;
+
+ return err;
+}
+
+int
+iwx_disable_beacon_filter(struct iwx_softc *sc)
+{
+ struct iwx_beacon_filter_cmd cmd;
+ int err;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ err = iwx_beacon_filter_send_cmd(sc, &cmd);
+ if (err == 0)
+ sc->sc_bf.bf_enabled = 0;
+
+ return err;
+}
+
+int
+iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
+{
+ struct iwx_add_sta_cmd add_sta_cmd;
+ int err;
+ uint32_t status;
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
+ panic("STA already added");
+
+ memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+ add_sta_cmd.sta_id = IWX_STATION_ID;
+ add_sta_cmd.station_type = IWX_STA_LINK;
+ add_sta_cmd.mac_id_n_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ if (!update) {
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
+ etherbroadcastaddr);
+ else
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
+ in->in_ni.ni_bssid);
+ }
+ add_sta_cmd.add_modify = update ? 1 : 0;
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
+ add_sta_cmd.tid_disable_tx = htole16(0xffff);
+ if (update)
+ add_sta_cmd.modify_mask |= (IWX_STA_MODIFY_TID_DISABLE_TX);
+
+ if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
+ IWX_STA_FLG_AGG_MPDU_DENS_MSK);
+
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
+ switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
+ case IEEE80211_AMPDU_PARAM_SS_2:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
+ break;
+ case IEEE80211_AMPDU_PARAM_SS_4:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
+ break;
+ case IEEE80211_AMPDU_PARAM_SS_8:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
+ break;
+ case IEEE80211_AMPDU_PARAM_SS_16:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
+ break;
+ default:
+ break;
+ }
+ }
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+
+ return err;
+}
+
+int
+iwx_add_aux_sta(struct iwx_softc *sc)
+{
+ struct iwx_add_sta_cmd cmd;
+ int err, qid = IWX_DQA_AUX_QUEUE;
+ uint32_t status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = IWX_AUX_STA_ID;
+ cmd.station_type = IWX_STA_AUX_ACTIVITY;
+ cmd.mac_id_n_color =
+ htole32(IWX_FW_CMD_ID_AND_COLOR(IWX_MAC_INDEX_AUX, 0));
+ cmd.tid_disable_tx = htole16(0xffff);
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
+ &status);
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ return EIO;
+
+ return iwx_enable_txq(sc, IWX_AUX_STA_ID, qid, IWX_MGMT_TID,
+ IWX_TX_RING_COUNT);
+}
+
+int
+iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_rm_sta_cmd rm_sta_cmd;
+ int err;
+
+ if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
+ panic("sta already removed");
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.sta_id = IWX_STATION_ID;
+
+ err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
+ &rm_sta_cmd);
+
+ return err;
+}
+
+uint8_t
+iwx_umac_scan_fill_channels(struct iwx_softc *sc,
+ struct iwx_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_channel *c;
+ uint8_t nchan;
+
+ for (nchan = 0, c = &ic->ic_channels[1];
+ c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
+ nchan < sc->sc_capa_n_scan_channels;
+ c++) {
+ if (c->ic_flags == 0)
+ continue;
+
+ chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
+ chan->iter_count = 1;
+ chan->iter_interval = htole16(0);
+ if (n_ssids != 0 && !bgscan)
+ chan->flags = htole32(1 << 0); /* select SSID 0 */
+ chan++;
+ nchan++;
+ }
+
+ return nchan;
+}
+
+int
+iwx_fill_probe_req_v1(struct iwx_softc *sc, struct iwx_scan_probe_req_v1 *preq1)
+{
+ struct iwx_scan_probe_req preq2;
+ int err, i;
+
+ err = iwx_fill_probe_req(sc, &preq2);
+ if (err)
+ return err;
+
+ preq1->mac_header = preq2.mac_header;
+ for (i = 0; i < nitems(preq1->band_data); i++)
+ preq1->band_data[i] = preq2.band_data[i];
+ preq1->common_data = preq2.common_data;
+ memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
+ return 0;
+}
+
+int
+iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
+ struct ieee80211_rateset *rs;
+ size_t remain = sizeof(preq->buf);
+ uint8_t *frm, *pos;
+
+ memset(preq, 0, sizeof(*preq));
+
+ if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
+ return ENOBUFS;
+
+ /*
+ * Build a probe request frame. Most of the following code is a
+ * copy & paste of what is done in net80211.
+ */
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+ IEEE80211_FC0_SUBTYPE_PROBE_REQ;
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
+ IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
+ *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
+ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
+
+ frm = (uint8_t *)(wh + 1);
+ frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
+
+ /* Tell the firmware where the MAC header is. */
+ preq->mac_header.offset = 0;
+ preq->mac_header.len = htole16(frm - (uint8_t *)wh);
+ remain -= frm - (uint8_t *)wh;
+
+ /* Fill in 2GHz IEs and tell firmware where they are. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates)
+ return ENOBUFS;
+ preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ preq->band_data[0].len = htole16(frm - pos);
+ remain -= frm - pos;
+
+ if (isset(sc->sc_enabled_capa,
+ IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
+ if (remain < 3)
+ return ENOBUFS;
+ *frm++ = IEEE80211_ELEMID_DSPARMS;
+ *frm++ = 1;
+ *frm++ = 0;
+ remain -= 3;
+ }
+
+ if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
+ /* Fill in 5GHz IEs. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates)
+ return ENOBUFS;
+ preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ preq->band_data[1].len = htole16(frm - pos);
+ remain -= frm - pos;
+ }
+
+ /* Send 11n IEs on both 2GHz and 5GHz bands. */
+ preq->common_data.offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ if (ic->ic_flags & IEEE80211_F_HTON) {
+ if (remain < 28)
+ return ENOBUFS;
+ frm = ieee80211_add_htcaps(frm, ic);
+ /* XXX add WME info? */
+ }
+ preq->common_data.len = htole16(frm - pos);
+
+ return 0;
+}
+
+int
+iwx_config_umac_scan(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_scan_config *scan_config;
+ int err, nchan;
+ size_t cmd_size;
+ struct ieee80211_channel *c;
+ struct iwx_host_cmd hcmd = {
+ .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
+ .flags = 0,
+ };
+ static const uint32_t rates = (IWX_SCAN_CONFIG_RATE_1M |
+ IWX_SCAN_CONFIG_RATE_2M | IWX_SCAN_CONFIG_RATE_5M |
+ IWX_SCAN_CONFIG_RATE_11M | IWX_SCAN_CONFIG_RATE_6M |
+ IWX_SCAN_CONFIG_RATE_9M | IWX_SCAN_CONFIG_RATE_12M |
+ IWX_SCAN_CONFIG_RATE_18M | IWX_SCAN_CONFIG_RATE_24M |
+ IWX_SCAN_CONFIG_RATE_36M | IWX_SCAN_CONFIG_RATE_48M |
+ IWX_SCAN_CONFIG_RATE_54M);
+
+ cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
+
+ scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
+ if (scan_config == NULL)
+ return ENOMEM;
+
+ scan_config->tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
+ scan_config->rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
+ scan_config->legacy_rates = htole32(rates |
+ IWX_SCAN_CONFIG_SUPPORTED_RATE(rates));
+
+ /* These timings correspond to iwlwifi's UNASSOC scan. */
+ scan_config->dwell.active = 10;
+ scan_config->dwell.passive = 110;
+ scan_config->dwell.fragmented = 44;
+ scan_config->dwell.extended = 90;
+ scan_config->out_of_channel_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
+ scan_config->out_of_channel_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
+ scan_config->suspend_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
+ scan_config->suspend_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
+
+ IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
+
+ scan_config->bcast_sta_id = IWX_AUX_STA_ID;
+ scan_config->channel_flags = 0;
+
+ for (c = &ic->ic_channels[1], nchan = 0;
+ c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
+ nchan < sc->sc_capa_n_scan_channels; c++) {
+ if (c->ic_flags == 0)
+ continue;
+ scan_config->channel_array[nchan++] =
+ ieee80211_mhz2ieee(c->ic_freq, 0);
+ }
+
+ scan_config->flags = htole32(IWX_SCAN_CONFIG_FLAG_ACTIVATE |
+ IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+ IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
+ IWX_SCAN_CONFIG_N_CHANNELS(nchan) |
+ IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+ hcmd.data[0] = scan_config;
+ hcmd.len[0] = cmd_size;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ free(scan_config, M_DEVBUF, cmd_size);
+ return err;
+}
+
+int
+iwx_umac_scan_size(struct iwx_softc *sc)
+{
+ int base_size = IWX_SCAN_REQ_UMAC_SIZE_V1;
+ int tail_size;
+
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
+ base_size = IWX_SCAN_REQ_UMAC_SIZE_V8;
+ else if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
+ base_size = IWX_SCAN_REQ_UMAC_SIZE_V7;
+#ifdef notyet
+ else if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
+ base_size = IWX_SCAN_REQ_UMAC_SIZE_V6;
+#endif
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
+ tail_size = sizeof(struct iwx_scan_req_umac_tail_v2);
+ else
+ tail_size = sizeof(struct iwx_scan_req_umac_tail_v1);
+
+ return base_size + sizeof(struct iwx_scan_channel_cfg_umac) *
+ sc->sc_capa_n_scan_channels + tail_size;
+}
+
+struct iwx_scan_umac_chan_param *
+iwx_get_scan_req_umac_chan_param(struct iwx_softc *sc,
+ struct iwx_scan_req_umac *req)
+{
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
+ return &req->v8.channel;
+
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
+ return &req->v7.channel;
+#ifdef notyet
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
+ return &req->v6.channel;
+#endif
+ return &req->v1.channel;
+}
+
+void *
+iwx_get_scan_req_umac_data(struct iwx_softc *sc, struct iwx_scan_req_umac *req)
+{
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
+ return (void *)&req->v8.data;
+
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
+ return (void *)&req->v7.data;
+#ifdef notyet
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
+ return (void *)&req->v6.data;
+#endif
+ return (void *)&req->v1.data;
+
+}
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+/* adaptive dwell default high band APs number */
+#define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+/* adaptive dwell default low band APs number */
+#define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+/* adaptive dwell default APs number in social channels (1, 6, 11) */
+#define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+
+int
+iwx_umac_scan(struct iwx_softc *sc, int bgscan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_host_cmd hcmd = {
+ .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
+ .len = { 0, },
+ .data = { NULL, },
+ .flags = 0,
+ };
+ struct iwx_scan_req_umac *req;
+ void *cmd_data, *tail_data;
+ struct iwx_scan_req_umac_tail_v2 *tail;
+ struct iwx_scan_req_umac_tail_v1 *tailv1;
+ struct iwx_scan_umac_chan_param *chanparam;
+ size_t req_len;
+ int err, async = bgscan;
+
+ req_len = iwx_umac_scan_size(sc);
+ if ((req_len < IWX_SCAN_REQ_UMAC_SIZE_V1 +
+ sizeof(struct iwx_scan_req_umac_tail_v1)) ||
+ req_len > IWX_MAX_CMD_PAYLOAD_SIZE)
+ return ERANGE;
+ req = malloc(req_len, M_DEVBUF,
+ (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
+ if (req == NULL)
+ return ENOMEM;
+
+ hcmd.len[0] = (uint16_t)req_len;
+ hcmd.data[0] = (void *)req;
+ hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
+
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
+ req->v7.adwell_default_n_aps_social =
+ IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ req->v7.adwell_default_n_aps =
+ IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
+
+ if (ic->ic_des_esslen != 0)
+ req->v7.adwell_max_budget =
+ htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ req->v7.adwell_max_budget =
+ htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ req->v7.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
+ req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = 0;
+ req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = 0;
+
+ if (isset(sc->sc_ucode_api,
+ IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
+ req->v8.active_dwell[IWX_SCAN_LB_LMAC_IDX] = 10;
+ req->v8.passive_dwell[IWX_SCAN_LB_LMAC_IDX] = 110;
+ } else {
+ req->v7.active_dwell = 10;
+ req->v7.passive_dwell = 110;
+ req->v7.fragmented_dwell = 44;
+ }
+ } else {
+ /* These timings correspond to iwlwifi's UNASSOC scan. */
+ req->v1.active_dwell = 10;
+ req->v1.passive_dwell = 110;
+ req->v1.fragmented_dwell = 44;
+ req->v1.extended_dwell = 90;
+ }
+
+ if (bgscan) {
+ const uint32_t timeout = htole32(120);
+ if (isset(sc->sc_ucode_api,
+ IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
+ req->v8.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
+ req->v8.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
+ } else if (isset(sc->sc_ucode_api,
+ IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
+ req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
+ req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
+ } else {
+ req->v1.max_out_time = timeout;
+ req->v1.suspend_time = timeout;
+ }
+ }
+
+ req->v1.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
+ req->ooc_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
+
+ cmd_data = iwx_get_scan_req_umac_data(sc, req);
+ chanparam = iwx_get_scan_req_umac_chan_param(sc, req);
+ chanparam->count = iwx_umac_scan_fill_channels(sc,
+ (struct iwx_scan_channel_cfg_umac *)cmd_data,
+ ic->ic_des_esslen != 0, bgscan);
+ chanparam->flags = 0;
+
+ tail_data = cmd_data + sizeof(struct iwx_scan_channel_cfg_umac) *
+ sc->sc_capa_n_scan_channels;
+ tail = tail_data;
+ /* tail v1 layout differs in preq and direct_scan member fields. */
+ tailv1 = tail_data;
+
+ req->general_flags = htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
+ IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
+ req->v8.general_flags2 =
+ IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
+ }
+
+#if 0 /* XXX Active scan causes firmware errors after association. */
+ /* Check if we're doing an active directed scan. */
+ if (ic->ic_des_esslen != 0) {
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
+ tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
+ tail->direct_scan[0].len = ic->ic_des_esslen;
+ memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
+ ic->ic_des_esslen);
+ } else {
+ tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
+ tailv1->direct_scan[0].len = ic->ic_des_esslen;
+ memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
+ ic->ic_des_esslen);
+ }
+ req->general_flags |=
+ htole32(IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
+ } else
+#endif
+ req->general_flags |= htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE);
+
+ if (isset(sc->sc_enabled_capa,
+ IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ req->general_flags |=
+ htole32(IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
+
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
+ req->general_flags |=
+ htole32(IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
+ } else {
+ req->general_flags |=
+ htole32(IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
+ }
+
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
+ err = iwx_fill_probe_req(sc, &tail->preq);
+ else
+ err = iwx_fill_probe_req_v1(sc, &tailv1->preq);
+ if (err) {
+ free(req, M_DEVBUF, req_len);
+ return err;
+ }
+
+ /* Specify the scan plan: We'll do one iteration. */
+ tail->schedule[0].interval = 0;
+ tail->schedule[0].iter_count = 1;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ free(req, M_DEVBUF, req_len);
+ return err;
+}
+
+uint8_t
+iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
+{
+ int i;
+ uint8_t rval;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
+ if (rval == iwx_rates[ridx].rate)
+ return rs->rs_rates[i];
+ }
+
+ return 0;
+}
+
+int
+iwx_rval2ridx(int rval)
+{
+ int ridx;
+
+ for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
+ if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
+ continue;
+ if (rval == iwx_rates[ridx].rate)
+ break;
+ }
+
+ return ridx;
+}
+
+void
+iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
+ int *ofdm_rates)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ int lowest_present_ofdm = -1;
+ int lowest_present_cck = -1;
+ uint8_t cck = 0;
+ uint8_t ofdm = 0;
+ int i;
+
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
+ IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
+ for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
+ if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ cck |= (1 << i);
+ if (lowest_present_cck == -1 || lowest_present_cck > i)
+ lowest_present_cck = i;
+ }
+ }
+ for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
+ if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
+ if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
+ lowest_present_ofdm = i;
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
+ if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWX_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
+ if (IWX_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
+ if (IWX_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+void
+iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_ctx_cmd *cmd, uint32_t action)
+{
+#define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ int cck_ack_rates, ofdm_ack_rates;
+ int i;
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd->action = htole32(action);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
+ else if (ic->ic_opmode == IEEE80211_M_STA)
+ cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
+ else
+ panic("unsupported operating mode %d\n", ic->ic_opmode);
+ cmd->tsf_id = htole32(IWX_TSF_ID_A);
+
+ IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
+ return;
+ }
+
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
+ iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
+ cmd->cck_rates = htole32(cck_ack_rates);
+ cmd->ofdm_rates = htole32(ofdm_ack_rates);
+
+ cmd->cck_short_preamble
+ = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot
+ = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
+ ? IWX_MAC_FLG_SHORT_SLOT : 0);
+
+ for (i = 0; i < EDCA_NUM_AC; i++) {
+ struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
+ int txf = iwx_ac_to_tx_fifo[i];
+
+ cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
+ cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
+ cmd->ac[txf].aifsn = ac->ac_aifsn;
+ cmd->ac[txf].fifos_mask = (1 << txf);
+ cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
+ }
+ if (ni->ni_flags & IEEE80211_NODE_QOS)
+ cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ enum ieee80211_htprot htprot =
+ (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
+ switch (htprot) {
+ case IEEE80211_HTPROT_NONE:
+ break;
+ case IEEE80211_HTPROT_NONMEMBER:
+ case IEEE80211_HTPROT_NONHT_MIXED:
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_HT_PROT);
+ if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_SELF_CTS_EN);
+ break;
+ case IEEE80211_HTPROT_20MHZ:
+ if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
+ /* XXX ... and if our channel is 40 MHz ... */
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_HT_PROT |
+ IWX_MAC_PROT_FLG_FAT_PROT);
+ if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
+ cmd->protection_flags |= htole32(
+ IWX_MAC_PROT_FLG_SELF_CTS_EN);
+ }
+ break;
+ default:
+ break;
+ }
+
+ cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
+ }
+ if (ic->ic_flags & IEEE80211_F_USEPROT)
+ cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
+
+ cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
+#undef IWX_EXP2
+}
+
+void
+iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_data_sta *sta, int assoc)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ uint32_t dtim_off;
+ uint64_t tsf;
+
+ dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
+ memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
+ tsf = letoh64(tsf);
+
+ sta->is_assoc = htole32(assoc);
+ sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
+ sta->dtim_tsf = htole64(tsf + dtim_off);
+ sta->bi = htole32(ni->ni_intval);
+ sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
+ sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
+ sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
+ sta->listen_interval = htole32(10);
+ sta->assoc_id = htole32(ni->ni_associd);
+ sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
+}
+
+int
+iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
+ int assoc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_mac_ctx_cmd cmd;
+ int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
+
+ if (action == IWX_FW_CTXT_ACTION_ADD && active)
+ panic("MAC already added");
+ if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
+ panic("MAC already removed");
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
+ IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
+ IWX_MAC_FILTER_IN_BEACON |
+ IWX_MAC_FILTER_IN_PROBE_REQUEST |
+ IWX_MAC_FILTER_IN_CRC32);
+ } else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
+ /*
+ * Allow beacons to pass through as long as we are not
+ * associated or we do not have dtim period information.
+ */
+ cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
+ else
+ iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
+
+ return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+
+int
+iwx_clear_statistics(struct iwx_softc *sc)
+{
+ struct iwx_statistics_cmd scmd = {
+ .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
+ };
+ struct iwx_host_cmd cmd = {
+ .id = IWX_STATISTICS_CMD,
+ .len[0] = sizeof(scmd),
+ .data[0] = &scmd,
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(struct iwx_notif_statistics),
+ };
+ int err;
+
+ err = iwx_send_cmd(sc, &cmd);
+ if (err)
+ return err;
+
+ iwx_free_resp(sc, &cmd);
+ return 0;
+}
+
+int
+iwx_update_quotas(struct iwx_softc *sc, struct iwx_node *in, int running)
+{
+ struct iwx_time_quota_cmd cmd;
+ int i, idx, num_active_macs, quota, quota_rem;
+ int colors[IWX_MAX_BINDINGS] = { -1, -1, -1, -1, };
+ int n_ifs[IWX_MAX_BINDINGS] = {0, };
+ uint16_t id;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /* currently, PHY ID == binding ID */
+ if (in && in->in_phyctxt) {
+ id = in->in_phyctxt->id;
+ KASSERT(id < IWX_MAX_BINDINGS);
+ colors[id] = in->in_phyctxt->color;
+ if (running)
+ n_ifs[id] = 1;
+ }
+
+ /*
+ * The FW's scheduling session consists of
+ * IWX_MAX_QUOTA fragments. Divide these fragments
+ * equally between all the bindings that require quota
+ */
+ num_active_macs = 0;
+ for (i = 0; i < IWX_MAX_BINDINGS; i++) {
+ cmd.quotas[i].id_and_color = htole32(IWX_FW_CTXT_INVALID);
+ num_active_macs += n_ifs[i];
+ }
+
+ quota = 0;
+ quota_rem = 0;
+ if (num_active_macs) {
+ quota = IWX_MAX_QUOTA / num_active_macs;
+ quota_rem = IWX_MAX_QUOTA % num_active_macs;
+ }
+
+ for (idx = 0, i = 0; i < IWX_MAX_BINDINGS; i++) {
+ if (colors[i] < 0)
+ continue;
+
+ cmd.quotas[idx].id_and_color =
+ htole32(IWX_FW_CMD_ID_AND_COLOR(i, colors[i]));
+
+ if (n_ifs[i] <= 0) {
+ cmd.quotas[idx].quota = htole32(0);
+ cmd.quotas[idx].max_duration = htole32(0);
+ } else {
+ cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
+ cmd.quotas[idx].max_duration = htole32(0);
+ }
+ idx++;
+ }
+
+ /* Give the remainder of the session to the first binding */
+ cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
+
+ return iwx_send_cmd_pdu(sc, IWX_TIME_QUOTA_CMD, 0,
+ sizeof(cmd), &cmd);
+}
+
+void
+iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
+{
+ int s = splnet();
+
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+ splx(s);
+ return;
+ }
+
+ refcnt_take(&sc->task_refs);
+ if (!task_add(taskq, task))
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
+void
+iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
+{
+ if (task_del(taskq, task))
+ refcnt_rele(&sc->task_refs);
+}
+
+int
+iwx_scan(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ int err;
+
+ if (sc->sc_flags & IWX_FLAG_BGSCAN) {
+ err = iwx_scan_abort(sc);
+ if (err) {
+ printf("%s: could not abort background scan\n",
+ DEVNAME(sc));
+ return err;
+ }
+ }
+
+ err = iwx_umac_scan(sc, 0);
+ if (err) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return err;
+ }
+
+ /*
+ * The current mode might have been fixed during association.
+ * Ensure all channels get scanned.
+ */
+ if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
+ ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
+
+ sc->sc_flags |= IWX_FLAG_SCANNING;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: %s -> %s\n", ifp->if_xname,
+ ieee80211_state_name[ic->ic_state],
+ ieee80211_state_name[IEEE80211_S_SCAN]);
+ if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
+ ieee80211_set_link_state(ic, LINK_STATE_DOWN);
+ ieee80211_node_cleanup(ic, ic->ic_bss);
+ }
+ ic->ic_state = IEEE80211_S_SCAN;
+ wakeup(&ic->ic_state); /* wake iwx_init() */
+
+ return 0;
+}
+
+int
+iwx_bgscan(struct ieee80211com *ic)
+{
+ struct iwx_softc *sc = IC2IFP(ic)->if_softc;
+ int err;
+
+ if (sc->sc_flags & IWX_FLAG_SCANNING)
+ return 0;
+
+ err = iwx_umac_scan(sc, 1);
+ if (err) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return err;
+ }
+
+ sc->sc_flags |= IWX_FLAG_BGSCAN;
+ return 0;
+}
+
+int
+iwx_umac_scan_abort(struct iwx_softc *sc)
+{
+ struct iwx_umac_scan_abort cmd = { 0 };
+
+ return iwx_send_cmd_pdu(sc,
+ IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
+ 0, sizeof(cmd), &cmd);
+}
+
+int
+iwx_scan_abort(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_umac_scan_abort(sc);
+ if (err == 0)
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+ return err;
+}
+
+int
+iwx_enable_data_tx_queues(struct iwx_softc *sc)
+{
+ int err, ac;
+
+ for (ac = 0; ac < EDCA_NUM_AC; ac++) {
+ int qid = ac + IWX_DQA_AUX_QUEUE + 1;
+ /*
+ * Regular data frames use the "MGMT" TID and queue.
+ * Other TIDs and queues are reserved for frame aggregation.
+ */
+ err = iwx_enable_txq(sc, IWX_STATION_ID, qid, IWX_MGMT_TID,
+ IWX_TX_RING_COUNT);
+ if (err) {
+ printf("%s: could not enable Tx queue %d (error %d)\n",
+ DEVNAME(sc), ac, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int
+iwx_auth(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ uint32_t duration;
+ int generation = sc->sc_generation, err;
+
+ splassert(IPL_NET);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
+ else
+ sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
+ err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
+ IWX_FW_CTXT_ACTION_MODIFY, 0);
+ if (err) {
+ printf("%s: could not update PHY context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ in->in_phyctxt = &sc->sc_phyctxt[0];
+
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
+ if (err) {
+ printf("%s: could not add MAC context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
+
+ err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
+ if (err) {
+ printf("%s: could not add binding (error %d)\n",
+ DEVNAME(sc), err);
+ goto rm_mac_ctxt;
+ }
+ sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
+
+ err = iwx_add_sta_cmd(sc, in, 0);
+ if (err) {
+ printf("%s: could not add sta (error %d)\n",
+ DEVNAME(sc), err);
+ goto rm_binding;
+ }
+ sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ return 0;
+
+ err = iwx_enable_data_tx_queues(sc);
+ if (err)
+ goto rm_sta;
+
+ err = iwx_clear_statistics(sc);
+ if (err)
+ goto rm_sta;
+
+ /*
+ * Prevent the FW from wandering off channel during association
+ * by "protecting" the session with a time event.
+ */
+ if (in->in_ni.ni_intval)
+ duration = in->in_ni.ni_intval * 2;
+ else
+ duration = IEEE80211_DUR_TU;
+ iwx_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
+
+ return 0;
+
+rm_sta:
+ if (generation == sc->sc_generation) {
+ iwx_rm_sta_cmd(sc, in);
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+rm_binding:
+ if (generation == sc->sc_generation) {
+ iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ }
+rm_mac_ctxt:
+ if (generation == sc->sc_generation) {
+ iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ }
+ return err;
+}
+
+int
+iwx_deauth(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int err;
+
+ splassert(IPL_NET);
+
+ iwx_unprotect_session(sc, in);
+
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_rm_sta_cmd(sc, in);
+ if (err) {
+ printf("%s: could not remove STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+
+ err = iwx_flush_tx_path(sc);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
+ err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
+ if (err) {
+ printf("%s: could not remove binding (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ }
+
+ if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
+ if (err) {
+ printf("%s: could not remove MAC context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ }
+
+ return 0;
+}
+
+int
+iwx_assoc(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int update_sta = (sc->sc_flags & IWX_FLAG_STA_ACTIVE);
+ int err;
+
+ splassert(IPL_NET);
+
+ err = iwx_add_sta_cmd(sc, in, update_sta);
+ if (err) {
+ printf("%s: could not %s STA (error %d)\n",
+ DEVNAME(sc), update_sta ? "update" : "add", err);
+ return err;
+ }
+
+ if (!update_sta)
+ err = iwx_enable_data_tx_queues(sc);
+
+ return err;
+}
+
+int
+iwx_disassoc(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int err;
+
+ splassert(IPL_NET);
+
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_rm_sta_cmd(sc, in);
+ if (err) {
+ printf("%s: could not remove STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+
+ return 0;
+}
+
+int
+iwx_run(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int err;
+
+ splassert(IPL_NET);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ /* Add a MAC context and a sniffing STA. */
+ err = iwx_auth(sc);
+ if (err)
+ return err;
+ }
+
+ /* Configure Rx chains for MIMO. */
+ if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
+ (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
+ !sc->sc_nvm.sku_cap_mimo_disable) {
+ err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
+ 2, 2, IWX_FW_CTXT_ACTION_MODIFY, 0);
+ if (err) {
+ printf("%s: failed to update PHY\n",
+ DEVNAME(sc));
+ return err;
+ }
+ }
+
+ /* We have now been assigned an associd by the AP. */
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
+ if (err) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_sf_config(sc, IWX_SF_FULL_ON);
+ if (err) {
+ printf("%s: could not set sf full on (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_allow_mcast(sc);
+ if (err) {
+ printf("%s: could not allow mcast (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_power_update_device(sc);
+ if (err) {
+ printf("%s: could not send power command (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+#ifdef notyet
+ /*
+ * Disabled for now. Default beacon filter settings
+ * prevent net80211 from getting ERP and HT protection
+ * updates from beacons.
+ */
+ err = iwx_enable_beacon_filter(sc, in);
+ if (err) {
+ printf("%s: could not enable beacon filter\n",
+ DEVNAME(sc));
+ return err;
+ }
+#endif
+ err = iwx_power_mac_update_mode(sc, in);
+ if (err) {
+ printf("%s: could not update MAC power (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ err = iwx_update_quotas(sc, in, 1);
+ if (err) {
+ printf("%s: could not update quotas (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ }
+
+ ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
+ ieee80211_mira_node_init(&in->in_mn);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ return 0;
+
+ /* Start at lowest available bit-rate, AMRR will raise. */
+ in->in_ni.ni_txrate = 0;
+ in->in_ni.ni_txmcs = 0;
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_TLC_OFFLOAD))
+ DPRINTF(("%s: TODO: Enable firmware rate scaling?\n",
+ DEVNAME(sc)));
+
+ timeout_add_msec(&sc->sc_calib_to, 500);
+ return 0;
+}
+
+int
+iwx_run_stop(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int err;
+
+ splassert(IPL_NET);
+
+ err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
+ if (err)
+ return err;
+
+ err = iwx_disable_beacon_filter(sc);
+ if (err) {
+ printf("%s: could not disable beacon filter (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ err = iwx_update_quotas(sc, in, 0);
+ if (err) {
+ printf("%s: could not update quotas (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ }
+
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
+ if (err) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* Reset Tx chains in case MIMO was enabled. */
+ if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
+ !sc->sc_nvm.sku_cap_mimo_disable) {
+ err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
+ IWX_FW_CTXT_ACTION_MODIFY, 0);
+ if (err) {
+ printf("%s: failed to update PHY\n", DEVNAME(sc));
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct ieee80211_node *
+iwx_node_alloc(struct ieee80211com *ic)
+{
+ return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
+}
+
+void
+iwx_calib_timeout(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ struct ieee80211_node *ni = &in->in_ni;
+ int s;
+
+ s = splnet();
+ if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
+ ((ni->ni_flags & IEEE80211_NODE_HT) == 0 || in->ht_force_cck) &&
+ ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
+ ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
+ if (in->ht_force_cck) {
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ uint8_t rv;
+ rv = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
+ if (IWX_RVAL_IS_OFDM(rv))
+ in->ht_force_cck = 0;
+ }
+ }
+
+ splx(s);
+
+ timeout_add_msec(&sc->sc_calib_to, 500);
+}
+
+int
+iwx_media_change(struct ifnet *ifp)
+{
+ struct iwx_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint8_t rate, ridx;
+ int err;
+
+ err = ieee80211_media_change(ifp);
+ if (err != ENETRESET)
+ return err;
+
+ if (ic->ic_fixed_mcs != -1)
+ sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
+ else if (ic->ic_fixed_rate != -1) {
+ rate = ic->ic_sup_rates[ic->ic_curmode].
+ rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
+ /* Map 802.11 rate to HW rate index. */
+ for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
+ if (iwx_rates[ridx].rate == rate)
+ break;
+ sc->sc_fixed_ridx = ridx;
+ }
+
+ if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+ (IFF_UP | IFF_RUNNING)) {
+ iwx_stop(ifp);
+ err = iwx_init(ifp);
+ }
+ return err;
+}
+
+void
+iwx_newstate_task(void *psc)
+{
+ struct iwx_softc *sc = (struct iwx_softc *)psc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ enum ieee80211_state nstate = sc->ns_nstate;
+ enum ieee80211_state ostate = ic->ic_state;
+ int arg = sc->ns_arg;
+ int err = 0, s = splnet();
+
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+ /* iwx_stop() is waiting for us. */
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+ return;
+ }
+
+ if (ostate == IEEE80211_S_SCAN) {
+ if (nstate == ostate) {
+ if (sc->sc_flags & IWX_FLAG_SCANNING) {
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+ return;
+ }
+ /* Firmware is no longer scanning. Do another scan. */
+ goto next_scan;
+ }
+ }
+
+ if (nstate <= ostate) {
+ switch (ostate) {
+ case IEEE80211_S_RUN:
+ err = iwx_run_stop(sc);
+ if (err)
+ goto out;
+ /* FALLTHROUGH */
+ case IEEE80211_S_ASSOC:
+ if (nstate <= IEEE80211_S_ASSOC) {
+ err = iwx_disassoc(sc);
+ if (err)
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case IEEE80211_S_AUTH:
+ if (nstate <= IEEE80211_S_AUTH) {
+ err = iwx_deauth(sc);
+ if (err)
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case IEEE80211_S_SCAN:
+ case IEEE80211_S_INIT:
+ break;
+ }
+
+ /* Die now if iwx_stop() was called while we were sleeping. */
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+ return;
+ }
+ }
+
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ break;
+
+ case IEEE80211_S_SCAN:
+next_scan:
+ err = iwx_scan(sc);
+ if (err)
+ break;
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+ return;
+
+ case IEEE80211_S_AUTH:
+ err = iwx_auth(sc);
+ break;
+
+ case IEEE80211_S_ASSOC:
+ err = iwx_assoc(sc);
+ break;
+
+ case IEEE80211_S_RUN:
+ err = iwx_run(sc);
+ break;
+ }
+
+out:
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
+ if (err)
+ task_add(systq, &sc->init_task);
+ else
+ sc->sc_newstate(ic, nstate, arg);
+ }
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
+int
+iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
+{
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwx_softc *sc = ifp->if_softc;
+ struct iwx_node *in = (void *)ic->ic_bss;
+
+ if (ic->ic_state == IEEE80211_S_RUN) {
+ timeout_del(&sc->sc_calib_to);
+ ieee80211_mira_cancel_timeouts(&in->in_mn);
+ iwx_del_task(sc, systq, &sc->ba_task);
+ iwx_del_task(sc, systq, &sc->htprot_task);
+ }
+
+ sc->ns_nstate = nstate;
+ sc->ns_arg = arg;
+
+ iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
+
+ return 0;
+}
+
+void
+iwx_endscan(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
+ return;
+
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+ ieee80211_end_scan(&ic->ic_if);
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const uint32_t
+iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_BA_AGING_TIMER_DEF),
+ htole32(IWX_SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
+ htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const uint32_t
+iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
+ htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
+ htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_MCAST_AGING_TIMER),
+ htole32(IWX_SF_MCAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_BA_AGING_TIMER),
+ htole32(IWX_SF_BA_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_TX_RE_AGING_TIMER),
+ htole32(IWX_SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+void
+iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_node *ni)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (ni) {
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ if (ni->ni_rxmcs[1] != 0)
+ watermark = IWX_SF_W_MARK_MIMO2;
+ else
+ watermark = IWX_SF_W_MARK_SISO;
+ } else {
+ watermark = IWX_SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = IWX_SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
+
+ for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+
+ if (ni) {
+ memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
+ sizeof(iwx_sf_full_timeout));
+ } else {
+ memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
+ sizeof(iwx_sf_full_timeout_def));
+ }
+
+}
+
+int
+iwx_sf_config(struct iwx_softc *sc, int new_state)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_sf_cfg_cmd sf_cmd = {
+ .state = htole32(new_state),
+ };
+ int err = 0;
+
+ switch (new_state) {
+ case IWX_SF_UNINIT:
+ case IWX_SF_INIT_OFF:
+ iwx_fill_sf_command(sc, &sf_cmd, NULL);
+ break;
+ case IWX_SF_FULL_ON:
+ iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
+ break;
+ default:
+ return EINVAL;
+ }
+
+ err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ return err;
+}
+
+int
+iwx_send_bt_init_conf(struct iwx_softc *sc)
+{
+ struct iwx_bt_coex_cmd bt_cmd;
+
+ bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
+ bt_cmd.enabled_modules = htole32(IWX_BT_COEX_HIGH_BAND_RET);
+
+ return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
+ &bt_cmd);
+}
+
+int
+iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
+{
+ struct iwx_mcc_update_cmd mcc_cmd;
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_MCC_UPDATE_CMD,
+ .flags = IWX_CMD_WANT_RESP,
+ .data = { &mcc_cmd },
+ };
+ int err;
+
+ memset(&mcc_cmd, 0, sizeof(mcc_cmd));
+ mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
+ mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
+ else
+ mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
+
+ hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
+ hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) +
+ sizeof(struct iwx_mcc_update_resp);
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ iwx_free_resp(sc, &hcmd);
+
+ return 0;
+}
+
+int
+iwx_init_hw(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err, i;
+
+ err = iwx_preinit(sc);
+ if (err)
+ return err;
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_run_init_mvm_ucode(sc, 0);
+ if (err)
+ return err;
+
+ /* Should stop and start HW since INIT image just loaded. */
+ iwx_stop_device(sc);
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_load_ucode_wait_alive(sc);
+ if (err) {
+ printf("%s: could not load firmware\n", DEVNAME(sc));
+ goto err;
+ }
+
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+
+ err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
+ if (err) {
+ printf("%s: could not init tx ant config (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ if (sc->sc_tx_with_siso_diversity) {
+ err = iwx_send_phy_cfg_cmd(sc);
+ if (err) {
+ printf("%s: could not send phy config (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+ }
+
+ err = iwx_send_bt_init_conf(sc);
+ if (err) {
+ printf("%s: could not init bt coex (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_send_dqa_cmd(sc);
+ if (err)
+ return err;
+
+ /* Add auxiliary station for scanning */
+ err = iwx_add_aux_sta(sc);
+ if (err) {
+ printf("%s: could not add aux station (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ for (i = 0; i < 1; i++) {
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten in the other flows.
+ * For now use the first channel we have.
+ */
+ sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
+ err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
+ IWX_FW_CTXT_ACTION_ADD, 0);
+ if (err) {
+ printf("%s: could not add phy context %d (error %d)\n",
+ DEVNAME(sc), i, err);
+ goto err;
+ }
+ }
+
+ err = iwx_config_ltr(sc);
+ if (err) {
+ printf("%s: PCIe LTR configuration failed (error %d)\n",
+ DEVNAME(sc), err);
+ }
+
+ err = iwx_power_update_device(sc);
+ if (err) {
+ printf("%s: could not send power command (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
+ err = iwx_send_update_mcc_cmd(sc, "ZZ");
+ if (err) {
+ printf("%s: could not init LAR (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+ }
+
+ err = iwx_config_umac_scan(sc);
+ if (err) {
+ printf("%s: could not configure scan (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ err = iwx_disable_beacon_filter(sc);
+ if (err) {
+ printf("%s: could not disable beacon filter (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+err:
+ iwx_nic_unlock(sc);
+ return err;
+}
+
+/* Allow multicast from our BSSID. */
+int
+iwx_allow_mcast(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = ic->ic_bss;
+ struct iwx_mcast_filter_cmd *cmd;
+ size_t size;
+ int err;
+
+ size = roundup(sizeof(*cmd), 4);
+ cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (cmd == NULL)
+ return ENOMEM;
+ cmd->filter_own = 1;
+ cmd->port_id = 0;
+ cmd->count = 0;
+ cmd->pass_all = 1;
+ IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
+
+ err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
+ 0, size, cmd);
+ free(cmd, M_DEVBUF, size);
+ return err;
+}
+
+int
+iwx_init(struct ifnet *ifp)
+{
+ struct iwx_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err, generation;
+
+ rw_assert_wrlock(&sc->ioctl_rwl);
+
+ generation = ++sc->sc_generation;
+
+ KASSERT(sc->task_refs.refs == 0);
+ refcnt_init(&sc->task_refs);
+
+ err = iwx_init_hw(sc);
+ if (err) {
+ if (generation == sc->sc_generation)
+ iwx_stop(ifp);
+ return err;
+ }
+
+ ifq_clr_oactive(&ifp->if_snd);
+ ifp->if_flags |= IFF_RUNNING;
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ ic->ic_bss->ni_chan = ic->ic_ibss_chan;
+ ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
+ return 0;
+ }
+
+ ieee80211_begin_scan(ifp);
+
+ /*
+ * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
+ * Wait until the transition to SCAN state has completed.
+ */
+ do {
+ err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
+ SEC_TO_NSEC(1));
+ if (generation != sc->sc_generation)
+ return ENXIO;
+ if (err)
+ return err;
+ } while (ic->ic_state != IEEE80211_S_SCAN);
+
+ return 0;
+}
+
+void
+iwx_start(struct ifnet *ifp)
+{
+ struct iwx_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni;
+ struct ether_header *eh;
+ struct mbuf *m;
+ int ac = EDCA_AC_BE; /* XXX */
+
+ if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
+ return;
+
+ for (;;) {
+ /* why isn't this done per-queue? */
+ if (sc->qfullmsk != 0) {
+ ifq_set_oactive(&ifp->if_snd);
+ break;
+ }
+
+ /* need to send management frames even if we're not RUNning */
+ m = mq_dequeue(&ic->ic_mgtq);
+ if (m) {
+ ni = m->m_pkthdr.ph_cookie;
+ goto sendit;
+ }
+
+ if (ic->ic_state != IEEE80211_S_RUN ||
+ (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
+ break;
+
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (!m)
+ break;
+ if (m->m_len < sizeof (*eh) &&
+ (m = m_pullup(m, sizeof (*eh))) == NULL) {
+ ifp->if_oerrors++;
+ continue;
+ }
+#if NBPFILTER > 0
+ if (ifp->if_bpf != NULL)
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+ if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
+ ifp->if_oerrors++;
+ continue;
+ }
+
+ sendit:
+#if NBPFILTER > 0
+ if (ic->ic_rawbpf != NULL)
+ bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
+#endif
+ if (iwx_tx(sc, m, ni, ac) != 0) {
+ ieee80211_release_node(ic, ni);
+ ifp->if_oerrors++;
+ continue;
+ }
+
+ if (ifp->if_flags & IFF_UP) {
+ sc->sc_tx_timer = 15;
+ ifp->if_timer = 1;
+ }
+ }
+
+ return;
+}
+
+void
+iwx_stop(struct ifnet *ifp)
+{
+ struct iwx_softc *sc = ifp->if_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ic->ic_bss;
+ int i, s = splnet();
+
+ rw_assert_wrlock(&sc->ioctl_rwl);
+
+ sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
+
+ /* Cancel scheduled tasks and let any stale tasks finish up. */
+ task_del(systq, &sc->init_task);
+ iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
+ iwx_del_task(sc, systq, &sc->ba_task);
+ iwx_del_task(sc, systq, &sc->htprot_task);
+ KASSERT(sc->task_refs.refs >= 1);
+ refcnt_finalize(&sc->task_refs, "iwxstop");
+
+ iwx_stop_device(sc);
+
+ /* Reset soft state. */
+
+ sc->sc_generation++;
+ for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
+ free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
+ sc->sc_cmd_resp_pkt[i] = NULL;
+ sc->sc_cmd_resp_len[i] = 0;
+ }
+ ifp->if_flags &= ~IFF_RUNNING;
+ ifq_clr_oactive(&ifp->if_snd);
+
+ in->in_phyctxt = NULL;
+ if (ic->ic_state == IEEE80211_S_RUN)
+ ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
+
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_HW_ERR;
+ sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
+
+ sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
+
+ timeout_del(&sc->sc_calib_to); /* XXX refcount? */
+ ifp->if_timer = sc->sc_tx_timer = 0;
+
+ splx(s);
+}
+
+void
+iwx_watchdog(struct ifnet *ifp)
+{
+ struct iwx_softc *sc = ifp->if_softc;
+
+ ifp->if_timer = 0;
+ if (sc->sc_tx_timer > 0) {
+ if (--sc->sc_tx_timer == 0) {
+ printf("%s: device timeout\n", DEVNAME(sc));
+#ifdef IWX_DEBUG
+ iwx_nic_error(sc);
+#endif
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
+ task_add(systq, &sc->init_task);
+ ifp->if_oerrors++;
+ return;
+ }
+ ifp->if_timer = 1;
+ }
+
+ ieee80211_watchdog(ifp);
+}
+
+int
+iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct iwx_softc *sc = ifp->if_softc;
+ int s, err = 0, generation = sc->sc_generation;
+
+ /*
+ * Prevent processes from entering this function while another
+ * process is tsleep'ing in it.
+ */
+ err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
+ if (err == 0 && generation != sc->sc_generation) {
+ rw_exit(&sc->ioctl_rwl);
+ return ENXIO;
+ }
+ if (err)
+ return err;
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ /* FALLTHROUGH */
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (!(ifp->if_flags & IFF_RUNNING)) {
+ err = iwx_init(ifp);
+ }
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ iwx_stop(ifp);
+ }
+ break;
+
+ default:
+ err = ieee80211_ioctl(ifp, cmd, data);
+ }
+
+ if (err == ENETRESET) {
+ err = 0;
+ if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+ (IFF_UP | IFF_RUNNING)) {
+ iwx_stop(ifp);
+ err = iwx_init(ifp);
+ }
+ }
+
+ splx(s);
+ rw_exit(&sc->ioctl_rwl);
+
+ return err;
+}
+
+#if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with uint32_t-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwx_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t trm_hw_status0; /* TRM HW status */
+ uint32_t trm_hw_status1; /* TRM HW status */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t bcon_time; /* beacon timer */
+ uint32_t tsf_low; /* network timestamp function timer */
+ uint32_t tsf_hi; /* network timestamp function timer */
+ uint32_t gp1; /* GP1 timer register */
+ uint32_t gp2; /* GP2 timer register */
+ uint32_t fw_rev_type; /* firmware revision type */
+ uint32_t major; /* uCode version major */
+ uint32_t minor; /* uCode version minor */
+ uint32_t hw_ver; /* HW Silicon version */
+ uint32_t brd_ver; /* HW board version */
+ uint32_t log_pc; /* log program counter */
+ uint32_t frame_ptr; /* frame pointer */
+ uint32_t stack_ptr; /* stack pointer */
+ uint32_t hcmd; /* last host command header */
+ uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
+ uint32_t wait_event; /* wait event() caller address */
+ uint32_t l2p_control; /* L2pControlField */
+ uint32_t l2p_duration; /* L2pDurationField */
+ uint32_t l2p_mhvalid; /* L2pMhValidBits */
+ uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+ uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ uint32_t u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ uint32_t flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwx_umac_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t blink1; /* branch link */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t umac_major;
+ uint32_t umac_minor;
+ uint32_t frame_pointer; /* core register 27*/
+ uint32_t stack_pointer; /* core register 28 */
+ uint32_t cmd_header; /* latest host cmd sent to UMAC */
+ uint32_t nic_isr_pref; /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
+#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
+
+void
+iwx_nic_umac_error(struct iwx_softc *sc)
+{
+ struct iwx_umac_error_event_table table;
+ uint32_t base;
+
+ base = sc->sc_uc.uc_umac_error_event_table;
+
+ if (base < 0x800000) {
+ printf("%s: Invalid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
+ iwx_desc_lookup(table.error_id));
+ printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
+ printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
+ printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
+ printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
+ printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
+ printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
+ printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
+ table.frame_pointer);
+ printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
+ table.stack_pointer);
+ printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
+ printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
+ table.nic_isr_pref);
+}
+
+#define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
+static struct {
+ const char *name;
+ uint8_t num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+const char *
+iwx_desc_lookup(uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < nitems(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num ==
+ (num & ~IWX_FW_SYSASSERT_CPU_MASK))
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Support for dumping the error log seemed like a good idea ...
+ * but it's mostly hex junk and the only sensible thing is the
+ * hw/ucode revision (which we know anyway). Since it's here,
+ * I'll just leave it in, just in case e.g. the Intel guys want to
+ * help us decipher some "ADVANCED_SYSASSERT" later.
+ */
+void
+iwx_nic_error(struct iwx_softc *sc)
+{
+ struct iwx_error_event_table table;
+ uint32_t base;
+
+ printf("%s: dumping device error log\n", DEVNAME(sc));
+ base = sc->sc_uc.uc_lmac_error_event_table[0];
+ if (base < 0x800000) {
+ printf("%s: Invalid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ if (!table.valid) {
+ printf("%s: errlog not found, skipping\n", DEVNAME(sc));
+ return;
+ }
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
+ iwx_desc_lookup(table.error_id));
+ printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
+ table.trm_hw_status0);
+ printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
+ table.trm_hw_status1);
+ printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
+ printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
+ printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
+ printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
+ printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
+ printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
+ printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
+ printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
+ printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
+ table.fw_rev_type);
+ printf("%s: %08X | uCode version major\n", DEVNAME(sc),
+ table.major);
+ printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
+ table.minor);
+ printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
+ printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
+ printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
+ printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
+ printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
+ printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
+ printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
+ printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
+ printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
+ printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
+ printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
+ printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
+ printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
+ printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
+ printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
+ printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
+ printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
+
+ if (sc->sc_uc.uc_umac_error_event_table)
+ iwx_nic_umac_error(sc);
+}
+#endif
+
+#define SYNC_RESP_STRUCT(_var_, _pkt_) \
+do { \
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
+ sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
+ _var_ = (void *)((_pkt_)+1); \
+} while (/*CONSTCOND*/0)
+
+#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
+do { \
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
+ sizeof(len), BUS_DMASYNC_POSTREAD); \
+ _ptr_ = (void *)((_pkt_)+1); \
+} while (/*CONSTCOND*/0)
+
+int
+iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
+{
+ int qid, idx, code;
+
+ qid = pkt->hdr.qid & ~0x80;
+ idx = pkt->hdr.idx;
+ code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
+
+ return (!(qid == 0 && idx == 0 && code == 0) &&
+ pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
+}
+
+void
+iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
+{
+ struct ifnet *ifp = IC2IFP(&sc->sc_ic);
+ struct iwx_rx_packet *pkt, *nextpkt;
+ uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
+ struct mbuf *m0, *m;
+ const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
+ size_t remain = IWX_RBUF_SIZE;
+ int qid, idx, code, handled = 1;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
+ BUS_DMASYNC_POSTREAD);
+
+ m0 = data->m;
+ while (m0 && offset + minsz < IWX_RBUF_SIZE) {
+ pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
+ qid = pkt->hdr.qid;
+ idx = pkt->hdr.idx;
+
+ code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
+
+ if (!iwx_rx_pkt_valid(pkt))
+ break;
+
+ len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
+ if (len < sizeof(pkt->hdr) ||
+ len > (IWX_RBUF_SIZE - offset - minsz))
+ break;
+
+ if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
+ /* Take mbuf m0 off the RX ring. */
+ if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
+ ifp->if_ierrors++;
+ break;
+ }
+ KASSERT(data->m != m0);
+ }
+
+ switch (code) {
+ case IWX_REPLY_RX_PHY_CMD:
+ iwx_rx_rx_phy_cmd(sc, pkt, data);
+ break;
+
+ case IWX_REPLY_RX_MPDU_CMD: {
+ size_t maxlen = remain - minsz;
+ nextoff = offset +
+ roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
+ nextpkt = (struct iwx_rx_packet *)
+ (m0->m_data + nextoff);
+ if (nextoff + minsz >= IWX_RBUF_SIZE ||
+ !iwx_rx_pkt_valid(nextpkt)) {
+ /* No need to copy last frame in buffer. */
+ if (offset > 0)
+ m_adj(m0, offset);
+ iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
+ m0 = NULL; /* stack owns m0 now; abort loop */
+ } else {
+ /*
+ * Create an mbuf which points to the current
+ * packet. Always copy from offset zero to
+ * preserve m_pkthdr.
+ */
+ m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
+ if (m == NULL) {
+ ifp->if_ierrors++;
+ break;
+ }
+ m_adj(m, offset);
+ iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
+ }
+
+ if (offset + minsz < remain)
+ remain -= offset;
+ else
+ remain = minsz;
+ break;
+ }
+
+ case IWX_TX_CMD:
+ iwx_rx_tx_cmd(sc, pkt, data);
+ break;
+
+ case IWX_MISSED_BEACONS_NOTIFICATION:
+ iwx_rx_bmiss(sc, pkt, data);
+ break;
+
+ case IWX_MFUART_LOAD_NOTIFICATION:
+ break;
+
+ case IWX_ALIVE: {
+ struct iwx_alive_resp_v4 *resp4;
+
+ DPRINTF(("%s: firmware alive\n", __func__));
+ if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
+ SYNC_RESP_STRUCT(resp4, pkt);
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sched_base = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.scd_base_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp4->umac_data.dbg_ptrs.error_info_addr);
+ if (resp4->status == IWX_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ else
+ sc->sc_uc.uc_ok = 0;
+ }
+
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+
+ case IWX_STATISTICS_NOTIFICATION: {
+ struct iwx_notif_statistics *stats;
+ SYNC_RESP_STRUCT(stats, pkt);
+ memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
+ sc->sc_noise = iwx_get_noise(&stats->rx.general);
+ break;
+ }
+
+ case IWX_DTS_MEASUREMENT_NOTIFICATION:
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_DTS_MEASUREMENT_NOTIF_WIDE):
+ break;
+
+ case IWX_PHY_CONFIGURATION_CMD:
+ case IWX_TX_ANT_CONFIGURATION_CMD:
+ case IWX_ADD_STA:
+ case IWX_MAC_CONTEXT_CMD:
+ case IWX_REPLY_SF_CFG_CMD:
+ case IWX_POWER_TABLE_CMD:
+ case IWX_LTR_CONFIG:
+ case IWX_PHY_CONTEXT_CMD:
+ case IWX_BINDING_CONTEXT_CMD:
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
+ case IWX_REPLY_BEACON_FILTERING_CMD:
+ case IWX_MAC_PM_POWER_TABLE:
+ case IWX_TIME_QUOTA_CMD:
+ case IWX_REMOVE_STA:
+ case IWX_TXPATH_FLUSH:
+ case IWX_BT_CONFIG:
+ case IWX_NVM_ACCESS_CMD:
+ case IWX_MCC_UPDATE_CMD:
+ case IWX_TIME_EVENT_CMD:
+ case IWX_STATISTICS_CMD:
+ case IWX_SCD_QUEUE_CFG: {
+ size_t pkt_len;
+
+ if (sc->sc_cmd_resp_pkt[idx] == NULL)
+ break;
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, 0,
+ sizeof(*pkt), BUS_DMASYNC_POSTREAD);
+
+ pkt_len = sizeof(pkt->len_n_flags) +
+ iwx_rx_packet_len(pkt);
+
+ if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
+ pkt_len < sizeof(*pkt) ||
+ pkt_len > sc->sc_cmd_resp_len[idx]) {
+ free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
+ sc->sc_cmd_resp_len[idx]);
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ break;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
+ pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
+ memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
+ break;
+ }
+
+ case IWX_INIT_COMPLETE_NOTIF:
+ sc->sc_init_complete |= IWX_INIT_COMPLETE;
+ wakeup(&sc->sc_init_complete);
+ break;
+
+ case IWX_SCAN_COMPLETE_UMAC: {
+ struct iwx_umac_scan_complete *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ iwx_endscan(sc);
+ break;
+ }
+
+ case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
+ struct iwx_umac_scan_iter_complete_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ iwx_endscan(sc);
+ break;
+ }
+
+ case IWX_REPLY_ERROR: {
+ struct iwx_error_resp *resp;
+ SYNC_RESP_STRUCT(resp, pkt);
+ printf("%s: firmware error 0x%x, cmd 0x%x\n",
+ DEVNAME(sc), le32toh(resp->error_type),
+ resp->cmd_id);
+ break;
+ }
+
+ case IWX_TIME_EVENT_NOTIFICATION: {
+ struct iwx_time_event_notif *notif;
+ uint32_t action;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ if (sc->sc_time_event_uid != le32toh(notif->unique_id))
+ break;
+ action = le32toh(notif->action);
+ if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
+ IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
+ break;
+
+ /*
+ * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
+ * messages. Just ignore them for now.
+ */
+ case IWX_DEBUG_LOG_MSG:
+ break;
+
+ case IWX_MCAST_FILTER_CMD:
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_ACCESS_COMPLETE):
+ break;
+
+ default:
+ handled = 0;
+ printf("%s: unhandled firmware response 0x%x/0x%x "
+ "rx ring %d[%d]\n",
+ DEVNAME(sc), code, pkt->len_n_flags,
+ (qid & ~0x80), idx);
+ break;
+ }
+
+ /*
+ * uCode sets bit 0x80 when it originates the notification,
+ * i.e. when the notification is not a direct response to a
+ * command sent by the driver.
+ * For example, uCode issues IWX_REPLY_RX when it sends a
+ * received frame to the driver.
+ */
+ if (handled && !(qid & (1 << 7))) {
+ iwx_cmd_done(sc, qid, idx, code);
+ }
+
+ offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
+ }
+
+ if (m0 && m0 != data->m)
+ m_freem(m0);
+}
+
+void
+iwx_notif_intr(struct iwx_softc *sc)
+{
+ struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+ uint16_t hw;
+
+ bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
+ 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
+
+ hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
+ hw &= (IWX_RX_MQ_RING_COUNT - 1);
+ while (sc->rxq.cur != hw) {
+ struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
+ iwx_rx_pkt(sc, data, &ml);
+ sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
+ }
+ if_input(&sc->sc_ic.ic_if, &ml);
+
+ /*
+ * Tell the firmware what we have processed.
+ * Seems like the hardware gets upset unless we align the write by 8??
+ */
+ hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
+}
+
+int
+iwx_intr(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ int handled = 0;
+ int r1, r2, rv = 0;
+ int isperiodic = 0;
+
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+
+ if (sc->sc_flags & IWX_FLAG_USE_ICT) {
+ uint32_t *ict = sc->ict_dma.vaddr;
+ int tmp;
+
+ tmp = htole32(ict[sc->ict_cur]);
+ if (!tmp)
+ goto out_ena;
+
+ /*
+ * ok, there was something. keep plowing until we have all.
+ */
+ r1 = r2 = 0;
+ while (tmp) {
+ r1 |= tmp;
+ ict[sc->ict_cur] = 0;
+ sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
+ tmp = htole32(ict[sc->ict_cur]);
+ }
+
+ /* this is where the fun begins. don't ask */
+ if (r1 == 0xffffffff)
+ r1 = 0;
+
+ /* i am not expected to understand this */
+ if (r1 & 0xc0000)
+ r1 |= 0x8000;
+ r1 = (0xff & r1) | ((0xff00 & r1) << 16);
+ } else {
+ r1 = IWX_READ(sc, IWX_CSR_INT);
+ if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
+ goto out;
+ r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
+ }
+ if (r1 == 0 && r2 == 0) {
+ goto out_ena;
+ }
+
+ IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
+
+ if (r1 & IWX_CSR_INT_BIT_ALIVE) {
+ int i;
+
+ /* Firmware has now configured the RFH. */
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
+ iwx_update_rx_desc(sc, &sc->rxq, i);
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
+ }
+
+ handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
+
+ if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
+ handled |= IWX_CSR_INT_BIT_RF_KILL;
+ iwx_check_rfkill(sc);
+ task_add(systq, &sc->init_task);
+ rv = 1;
+ goto out_ena;
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
+#if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
+ int i;
+
+ iwx_nic_error(sc);
+
+ /* Dump driver status (TX and RX rings) while we're here. */
+ printf("driver status:\n");
+ for (i = 0; i < IWX_MAX_QUEUES; i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+ printf(" tx ring %2d: qid=%-2d cur=%-3d "
+ "queued=%-3d\n",
+ i, ring->qid, ring->cur, ring->queued);
+ }
+ printf(" rx ring: cur=%d\n", sc->rxq.cur);
+ printf(" 802.11 state %s\n",
+ ieee80211_state_name[sc->sc_ic.ic_state]);
+#endif
+
+ printf("%s: fatal firmware error\n", DEVNAME(sc));
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
+ task_add(systq, &sc->init_task);
+ rv = 1;
+ goto out;
+
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
+ handled |= IWX_CSR_INT_BIT_HW_ERR;
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
+ sc->sc_flags |= IWX_FLAG_HW_ERR;
+ task_add(systq, &sc->init_task);
+ }
+ rv = 1;
+ goto out;
+ }
+
+ /* firmware chunk loaded */
+ if (r1 & IWX_CSR_INT_BIT_FH_TX) {
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
+ handled |= IWX_CSR_INT_BIT_FH_TX;
+
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
+ handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
+ IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
+ if ((r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) == 0)
+ IWX_WRITE_1(sc,
+ IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
+ isperiodic = 1;
+ }
+
+ if ((r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) ||
+ isperiodic) {
+ handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
+
+ iwx_notif_intr(sc);
+
+ /* enable periodic interrupt, see above */
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX) &&
+ !isperiodic)
+ IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
+ IWX_CSR_INT_PERIODIC_ENA);
+ }
+
+ rv = 1;
+
+ out_ena:
+ iwx_restore_interrupts(sc);
+ out:
+ return rv;
+}
+
+int
+iwx_intr_msix(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ uint32_t inta_fh, inta_hw;
+ int vector = 0;
+
+ inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ inta_fh &= sc->sc_fh_mask;
+ inta_hw &= sc->sc_hw_mask;
+
+ if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
+ inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
+ iwx_notif_intr(sc);
+ }
+
+ /* firmware chunk loaded */
+ if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
+ (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+#if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
+ int i;
+
+ iwx_nic_error(sc);
+
+ /* Dump driver status (TX and RX rings) while we're here. */
+ printf("driver status:\n");
+ for (i = 0; i < IWX_MAX_QUEUES; i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+ printf(" tx ring %2d: qid=%-2d cur=%-3d "
+ "queued=%-3d\n",
+ i, ring->qid, ring->cur, ring->queued);
+ }
+ printf(" rx ring: cur=%d\n", sc->rxq.cur);
+ printf(" 802.11 state %s\n",
+ ieee80211_state_name[sc->sc_ic.ic_state]);
+#endif
+
+ printf("%s: fatal firmware error\n", DEVNAME(sc));
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
+ task_add(systq, &sc->init_task);
+ return 1;
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
+ iwx_check_rfkill(sc);
+ task_add(systq, &sc->init_task);
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
+ sc->sc_flags |= IWX_FLAG_HW_ERR;
+ task_add(systq, &sc->init_task);
+ }
+ return 1;
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
+ int i;
+
+ /* Firmware has now configured the RFH. */
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
+ iwx_update_rx_desc(sc, &sc->rxq, i);
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
+ }
+
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
+ return 1;
+}
+
+typedef void *iwx_match_t;
+
+static const struct pci_matchid iwx_devices[] = {
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
+};
+
+int
+iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
+{
+ return pci_matchbyid((struct pci_attach_args *)aux, iwx_devices,
+ nitems(iwx_devices));
+}
+
+int
+iwx_preinit(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ int err;
+ static int attached;
+
+ err = iwx_prepare_card_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ if (attached) {
+ /* Update MAC in case the upper layers changed it. */
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
+ ((struct arpcom *)ifp)->ac_enaddr);
+ return 0;
+ }
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_run_init_mvm_ucode(sc, 1);
+ iwx_stop_device(sc);
+ if (err)
+ return err;
+
+ /* Print version info and MAC address on first successful fw load. */
+ attached = 1;
+ printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
+ DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
+ sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
+
+ if (sc->sc_nvm.sku_cap_11n_enable)
+ iwx_setup_ht_rates(sc);
+
+ /* not all hardware can do 5GHz band */
+ if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
+ memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
+ sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
+
+ /* Configure channel information obtained from firmware. */
+ ieee80211_channel_init(ifp);
+
+ /* Configure MAC address. */
+ err = if_setlladdr(ifp, ic->ic_myaddr);
+ if (err)
+ printf("%s: could not set MAC address (error %d)\n",
+ DEVNAME(sc), err);
+
+ ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
+
+ return 0;
+}
+
+void
+iwx_attach_hook(struct device *self)
+{
+ struct iwx_softc *sc = (void *)self;
+
+ KASSERT(!cold);
+
+ iwx_preinit(sc);
+}
+
+void
+iwx_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct iwx_softc *sc = (void *)self;
+ struct pci_attach_args *pa = aux;
+ pci_intr_handle_t ih;
+ pcireg_t reg, memtype;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = &ic->ic_if;
+ const char *intrstr;
+ int err;
+ int txq_i, i;
+
+ sc->sc_pct = pa->pa_pc;
+ sc->sc_pcitag = pa->pa_tag;
+ sc->sc_dmat = pa->pa_dmat;
+
+ rw_init(&sc->ioctl_rwl, "iwxioctl");
+
+ err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
+ PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
+ if (err == 0) {
+ printf("%s: PCIe capability structure not found!\n",
+ DEVNAME(sc));
+ return;
+ }
+
+ /* Clear device-specific "PCI retry timeout" register (41h). */
+ reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
+
+ /* Enable bus-mastering and hardware bug workaround. */
+ reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
+ reg |= PCI_COMMAND_MASTER_ENABLE;
+ /* if !MSI */
+ if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
+ reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
+ }
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
+
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
+ err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
+ &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
+ if (err) {
+ printf("%s: can't map mem space\n", DEVNAME(sc));
+ return;
+ }
+
+ if (pci_intr_map_msix(pa, 0, &ih) == 0) {
+ sc->sc_msix = 1;
+ } else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
+ printf("%s: can't map interrupt\n", DEVNAME(sc));
+ return;
+ }
+
+ intrstr = pci_intr_string(sc->sc_pct, ih);
+ if (sc->sc_msix)
+ sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
+ iwx_intr_msix, sc, DEVNAME(sc));
+ else
+ sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
+ iwx_intr, sc, DEVNAME(sc));
+
+ if (sc->sc_ih == NULL) {
+ printf("\n");
+ printf("%s: can't establish interrupt", DEVNAME(sc));
+ if (intrstr != NULL)
+ printf(" at %s", intrstr);
+ printf("\n");
+ return;
+ }
+ printf(", %s\n", intrstr);
+
+ iwx_disable_interrupts(sc);
+
+ sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
+ switch (PCI_PRODUCT(pa->pa_id)) {
+ case PCI_PRODUCT_INTEL_WL_22500_1:
+ sc->sc_fwname = "iwx-cc-a0-46";
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
+ sc->sc_nvm_max_section_size = 32768;
+ sc->sc_integrated = 1;
+ sc->sc_tx_with_siso_diversity = 0;
+ break;
+ default:
+ printf("%s: unknown adapter type\n", DEVNAME(sc));
+ return;
+ }
+
+ /*
+ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+ * changed, and now the revision step also includes bit 0-1 (no more
+ * "dash" value). To keep hw_rev backwards compatible - we'll store it
+ * in the old format.
+ */
+ sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
+ (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
+
+ if (iwx_prepare_card_hw(sc) != 0) {
+ printf("%s: could not initialize hardware\n",
+ DEVNAME(sc));
+ return;
+ }
+
+ /*
+ * In order to recognize C step the driver should read the
+ * chip version id located at the AUX bus MISC address.
+ */
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ DELAY(2);
+
+ err = iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (!err) {
+ printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
+ return;
+ }
+
+ if (iwx_nic_lock(sc)) {
+ uint32_t hw_step = iwx_read_prph(sc, IWX_WFPM_CTRL_REG);
+ hw_step |= IWX_ENABLE_WFPM;
+ iwx_write_prph(sc, IWX_WFPM_CTRL_REG, hw_step);
+ hw_step = iwx_read_prph(sc, IWX_AUX_MISC_REG);
+ hw_step = (hw_step >> IWX_HW_STEP_LOCATION_BITS) & 0xF;
+ if (hw_step == 0x3)
+ sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
+ (IWX_SILICON_C_STEP << 2);
+ iwx_nic_unlock(sc);
+ } else {
+ printf("%s: Failed to lock the nic\n", DEVNAME(sc));
+ return;
+ }
+
+ /*
+ * Allocate DMA memory for firmware transfers.
+ * Must be aligned on a 16-byte boundary.
+ */
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
+ sc->sc_fwdmasegsz, 16);
+ if (err) {
+ printf("%s: could not allocate memory for firmware\n",
+ DEVNAME(sc));
+ return;
+ }
+
+ /* Allocate interrupt cause table (ICT).*/
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
+ IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
+ if (err) {
+ printf("%s: could not allocate ICT table\n", DEVNAME(sc));
+ goto fail1;
+ }
+
+ /* TX scheduler rings must be aligned on a 1KB boundary. */
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
+ nitems(sc->txq) * sizeof(struct iwx_agn_scd_bc_tbl), 1024);
+ if (err) {
+ printf("%s: could not allocate TX scheduler rings\n",
+ DEVNAME(sc));
+ goto fail3;
+ }
+
+ for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
+ err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
+ if (err) {
+ printf("%s: could not allocate TX ring %d\n",
+ DEVNAME(sc), txq_i);
+ goto fail4;
+ }
+ }
+
+ err = iwx_alloc_rx_ring(sc, &sc->rxq);
+ if (err) {
+ printf("%s: could not allocate RX ring\n", DEVNAME(sc));
+ goto fail4;
+ }
+
+ sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
+ if (sc->sc_nswq == NULL)
+ goto fail4;
+
+ /* Clear pending interrupts. */
+ IWX_WRITE(sc, IWX_CSR_INT, 0xffffffff);
+
+ ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
+ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
+ ic->ic_state = IEEE80211_S_INIT;
+
+ /* Set device capabilities. */
+ ic->ic_caps =
+ IEEE80211_C_WEP | /* WEP */
+ IEEE80211_C_RSN | /* WPA/RSN */
+ IEEE80211_C_SCANALL | /* device scans all channels at once */
+ IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
+ IEEE80211_C_MONITOR | /* monitor mode supported */
+ IEEE80211_C_SHSLOT | /* short slot time supported */
+ IEEE80211_C_SHPREAMBLE; /* short preamble supported */
+
+ ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
+ ic->ic_htcaps |=
+ (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
+ ic->ic_htxcaps = 0;
+ ic->ic_txbfcaps = 0;
+ ic->ic_aselcaps = 0;
+ ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
+
+ ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
+ ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
+ ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
+
+ for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
+ sc->sc_phyctxt[i].id = i;
+ }
+
+ sc->sc_amrr.amrr_min_success_threshold = 1;
+ sc->sc_amrr.amrr_max_success_threshold = 15;
+
+ /* IBSS channel undefined for now. */
+ ic->ic_ibss_chan = &ic->ic_channels[1];
+
+ ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
+
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = iwx_ioctl;
+ ifp->if_start = iwx_start;
+ ifp->if_watchdog = iwx_watchdog;
+ memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
+
+ if_attach(ifp);
+ ieee80211_ifattach(ifp);
+ ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
+
+#if NBPFILTER > 0
+ iwx_radiotap_attach(sc);
+#endif
+ timeout_set(&sc->sc_calib_to, iwx_calib_timeout, sc);
+ task_set(&sc->init_task, iwx_init_task, sc);
+ task_set(&sc->newstate_task, iwx_newstate_task, sc);
+ task_set(&sc->ba_task, iwx_ba_task, sc);
+ task_set(&sc->htprot_task, iwx_htprot_task, sc);
+
+ ic->ic_node_alloc = iwx_node_alloc;
+#ifdef notyet
+ /* TODO: background scans trigger firmware errors */
+ ic->ic_bgscan_start = iwx_bgscan;
+#endif
+
+ /* Override 802.11 state transition machine. */
+ sc->sc_newstate = ic->ic_newstate;
+ ic->ic_newstate = iwx_newstate;
+ ic->ic_update_htprot = iwx_update_htprot;
+ ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
+ ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
+#ifdef notyet
+ ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
+ ic->ic_ampdu_tx_stop = iwx_ampdu_tx_stop;
+#endif
+ /*
+ * We cannot read the MAC address without loading the
+ * firmware from disk. Postpone until mountroot is done.
+ */
+ config_mountroot(self, iwx_attach_hook);
+
+ return;
+
+fail4: while (--txq_i >= 0)
+ iwx_free_tx_ring(sc, &sc->txq[txq_i]);
+ iwx_free_rx_ring(sc, &sc->rxq);
+ iwx_dma_contig_free(&sc->sched_dma);
+fail3: if (sc->ict_dma.vaddr != NULL)
+ iwx_dma_contig_free(&sc->ict_dma);
+
+fail1: iwx_dma_contig_free(&sc->fw_dma);
+ return;
+}
+
+#if NBPFILTER > 0
+void
+iwx_radiotap_attach(struct iwx_softc *sc)
+{
+ bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
+ sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
+
+ sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
+ sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
+ sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
+
+ sc->sc_txtap_len = sizeof sc->sc_txtapu;
+ sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
+ sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
+}
+#endif
+
+void
+iwx_init_task(void *arg1)
+{
+ struct iwx_softc *sc = arg1;
+ struct ifnet *ifp = &sc->sc_ic.ic_if;
+ int s = splnet();
+ int generation = sc->sc_generation;
+ int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
+
+ rw_enter_write(&sc->ioctl_rwl);
+ if (generation != sc->sc_generation) {
+ rw_exit(&sc->ioctl_rwl);
+ splx(s);
+ return;
+ }
+
+ if (ifp->if_flags & IFF_RUNNING)
+ iwx_stop(ifp);
+ else
+ sc->sc_flags &= ~IWX_FLAG_HW_ERR;
+
+ if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
+ iwx_init(ifp);
+
+ rw_exit(&sc->ioctl_rwl);
+ splx(s);
+}
+
+int
+iwx_resume(struct iwx_softc *sc)
+{
+ pcireg_t reg;
+
+ /* Clear device-specific "PCI retry timeout" register (41h). */
+ reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
+ pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
+
+ /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
+ iwx_conf_msix_hw(sc, 0);
+
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ return iwx_prepare_card_hw(sc);
+}
+
+int
+iwx_activate(struct device *self, int act)
+{
+ struct iwx_softc *sc = (struct iwx_softc *)self;
+ struct ifnet *ifp = &sc->sc_ic.ic_if;
+ int err = 0;
+
+ switch (act) {
+ case DVACT_QUIESCE:
+ if (ifp->if_flags & IFF_RUNNING) {
+ rw_enter_write(&sc->ioctl_rwl);
+ iwx_stop(ifp);
+ rw_exit(&sc->ioctl_rwl);
+ }
+ break;
+ case DVACT_RESUME:
+ err = iwx_resume(sc);
+ if (err)
+ printf("%s: could not initialize hardware\n",
+ DEVNAME(sc));
+ break;
+ case DVACT_WAKEUP:
+ /* Hardware should be up at this point. */
+ if (iwx_set_hw_ready(sc))
+ task_add(systq, &sc->init_task);
+ break;
+ }
+
+ return 0;
+}
+
+struct cfdriver iwx_cd = {
+ NULL, "iwx", DV_IFNET
+};
+
+struct cfattach iwx_ca = {
+ sizeof(struct iwx_softc), iwx_match, iwx_attach,
+ NULL, iwx_activate
+};
diff --git a/sys/dev/pci/if_iwxreg.h b/sys/dev/pci/if_iwxreg.h
new file mode 100644
index 00000000000..d27823b64d5
--- /dev/null
+++ b/sys/dev/pci/if_iwxreg.h
@@ -0,0 +1,6206 @@
+/* $OpenBSD: if_iwxreg.h,v 1.1 2020/02/15 08:47:14 stsp Exp $ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+
+/* maximmum number of DRAM map entries supported by FW */
+#define IWX_MAX_DRAM_ENTRY 64
+#define IWX_CSR_CTXT_INFO_BA 0x40
+
+/**
+ * enum iwx_context_info_flags - Context information control flags
+ * @IWX_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ * the init done for driver command that configures several system modes
+ * @IWX_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWX_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWX_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * exponent, the actual size is 2**value, valid sizes are 8-2048.
+ * The value is four bits long. Maximum valid exponent is 12
+ * @IWX_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ * default is short format - not supported by the driver)
+ * @IWX_CTXT_INFO_RB_SIZE_POS: RB size position
+ * (values are IWX_CTXT_INFO_RB_SIZE_*K)
+ * @IWX_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
+ */
+enum iwx_context_info_flags {
+ IWX_CTXT_INFO_AUTO_FUNC_INIT = (1 << 0),
+ IWX_CTXT_INFO_EARLY_DEBUG = (1 << 1),
+ IWX_CTXT_INFO_ENABLE_CDMP = (1 << 2),
+ IWX_CTXT_INFO_RB_CB_SIZE_POS = 4,
+ IWX_CTXT_INFO_TFD_FORMAT_LONG = (1 << 8),
+ IWX_CTXT_INFO_RB_SIZE_POS = 9,
+ IWX_CTXT_INFO_RB_SIZE_1K = 0x1,
+ IWX_CTXT_INFO_RB_SIZE_2K = 0x2,
+ IWX_CTXT_INFO_RB_SIZE_4K = 0x4,
+ IWX_CTXT_INFO_RB_SIZE_8K = 0x8,
+ IWX_CTXT_INFO_RB_SIZE_12K = 0x9,
+ IWX_CTXT_INFO_RB_SIZE_16K = 0xa,
+ IWX_CTXT_INFO_RB_SIZE_20K = 0xb,
+ IWX_CTXT_INFO_RB_SIZE_24K = 0xc,
+ IWX_CTXT_INFO_RB_SIZE_28K = 0xd,
+ IWX_CTXT_INFO_RB_SIZE_32K = 0xe,
+};
+
+/*
+ * struct iwx_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwx_context_info_version {
+ uint16_t mac_id;
+ uint16_t version;
+ uint16_t size;
+ uint16_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwx_context_info_flags
+ */
+struct iwx_context_info_control {
+ uint32_t control_flags;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwx_context_info_dram {
+ uint64_t umac_img[IWX_MAX_DRAM_ENTRY];
+ uint64_t lmac_img[IWX_MAX_DRAM_ENTRY];
+ uint64_t virtual_img[IWX_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwx_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwx_context_info_rbd_cfg {
+ uint64_t free_rbd_addr;
+ uint64_t used_rbd_addr;
+ uint64_t status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwx_context_info_hcmd_cfg - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwx_context_info_hcmd_cfg {
+ uint64_t cmd_queue_addr;
+ uint8_t cmd_queue_size;
+ uint8_t reserved[7];
+} __packed;
+
+/*
+ * struct iwx_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwx_context_info_dump_cfg {
+ uint64_t core_dump_addr;
+ uint32_t core_dump_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwx_context_info_pnvm_cfg {
+ uint64_t platform_nvm_addr;
+ uint32_t platform_nvm_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_early_dbg_cfg - early debug configuration for
+ * dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwx_context_info_early_dbg_cfg {
+ uint64_t early_debug_addr;
+ uint32_t early_debug_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwx_context_info {
+ struct iwx_context_info_version version;
+ struct iwx_context_info_control control;
+ uint64_t reserved0;
+ struct iwx_context_info_rbd_cfg rbd_cfg;
+ struct iwx_context_info_hcmd_cfg hcmd_cfg;
+ uint32_t reserved1[4];
+ struct iwx_context_info_dump_cfg dump_cfg;
+ struct iwx_context_info_early_dbg_cfg edbg_cfg;
+ struct iwx_context_info_pnvm_cfg pnvm_cfg;
+ uint32_t reserved2[16];
+ struct iwx_context_info_dram dram;
+ uint32_t reserved3[16];
+} __packed;
+
+#define IWX_MGMT_TID 15
+
+#define IWX_MQ_RX_TABLE_SIZE 512
+
+/* cb size is the exponent */
+#define IWX_RX_QUEUE_CB_SIZE(x) ((sizeof(x) <= 4) ? (fls(x) - 1) : (flsl(x) - 1))
+
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. IWX_CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_write_direct32() family for these registers;
+ * no need to "grab nic access" via IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE: Device does need to be awake in order to read this memory
+ * via IWX_CSR_EEPROM and IWX_CSR_OTP registers
+ */
+#define IWX_CSR_HW_IF_CONFIG_REG (0x000) /* hardware interface config */
+#define IWX_CSR_INT_COALESCING (0x004) /* accum ints, 32-usec units */
+#define IWX_CSR_INT (0x008) /* host interrupt status/ack */
+#define IWX_CSR_INT_MASK (0x00c) /* host interrupt enable */
+#define IWX_CSR_FH_INT_STATUS (0x010) /* busmaster int status/ack*/
+#define IWX_CSR_GPIO_IN (0x018) /* read external chip pins */
+#define IWX_CSR_RESET (0x020) /* busmaster enable, NMI, etc*/
+#define IWX_CSR_GP_CNTRL (0x024)
+
+/* 2nd byte of IWX_CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define IWX_CSR_INT_PERIODIC_REG (0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-4: Type of device: see IWX_CSR_HW_REV_TYPE_xxx definitions
+ * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
+ * 1-0: "Dash" (-) value, as in A-1, etc.
+ */
+#define IWX_CSR_HW_REV (0x028)
+
+#define IWX_CSR_GIO_REG (0x03C)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define IWX_CSR_UCODE_DRV_GP1 (0x054)
+#define IWX_CSR_UCODE_DRV_GP1_SET (0x058)
+#define IWX_CSR_UCODE_DRV_GP1_CLR (0x05c)
+#define IWX_CSR_UCODE_DRV_GP2 (0x060)
+
+#define IWX_CSR_MBOX_SET_REG (0x088)
+#define IWX_CSR_MBOX_SET_REG_OS_ALIVE 0x20
+
+#define IWX_CSR_DRAM_INT_TBL_REG (0x0A0)
+#define IWX_CSR_MAC_SHADOW_REG_CTRL (0x0A8) /* 6000 and up */
+
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWX_CSR_GIO_CHICKEN_BITS (0x100)
+
+#define IWX_CSR_DBG_HPET_MEM_REG (0x240)
+#define IWX_CSR_DBG_LINK_PWR_MGMT_REG (0x250)
+
+/* Bits for IWX_CSR_HW_IF_CONFIG_REG */
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
+
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
+
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define IWX_CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
+#define IWX_CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+
+#define IWX_CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
+#define IWX_CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define IWX_CSR_INT_BIT_FH_RX (1U << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define IWX_CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define IWX_CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define IWX_CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define IWX_CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define IWX_CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define IWX_CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define IWX_CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define IWX_CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
+#define IWX_CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define IWX_CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+
+#define IWX_CSR_INI_SET_MASK (IWX_CSR_INT_BIT_FH_RX | \
+ IWX_CSR_INT_BIT_HW_ERR | \
+ IWX_CSR_INT_BIT_FH_TX | \
+ IWX_CSR_INT_BIT_SW_ERR | \
+ IWX_CSR_INT_BIT_RF_KILL | \
+ IWX_CSR_INT_BIT_SW_RX | \
+ IWX_CSR_INT_BIT_WAKEUP | \
+ IWX_CSR_INT_BIT_ALIVE | \
+ IWX_CSR_INT_BIT_RX_PERIODIC)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define IWX_CSR_FH_INT_BIT_ERR (1U << 31) /* Error */
+#define IWX_CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define IWX_CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define IWX_CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define IWX_CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define IWX_CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+
+#define IWX_CSR_FH_INT_RX_MASK (IWX_CSR_FH_INT_BIT_HI_PRIOR | \
+ IWX_CSR_FH_INT_BIT_RX_CHNL1 | \
+ IWX_CSR_FH_INT_BIT_RX_CHNL0)
+
+#define IWX_CSR_FH_INT_TX_MASK (IWX_CSR_FH_INT_BIT_TX_CHNL1 | \
+ IWX_CSR_FH_INT_BIT_TX_CHNL0)
+
+/* RESET */
+#define IWX_CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
+#define IWX_CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define IWX_CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
+#define IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
+#define IWX_CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
+#define IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24: POWER_SAVE_TYPE
+ * Indicates current power-saving mode:
+ * 000 -- No power saving
+ * 001 -- MAC power-down
+ * 010 -- PHY (radio) power-down
+ * 011 -- Error
+ * 9-6: SYS_CONFIG
+ * Indicates current system configuration, reflecting pins on chip
+ * as forced high/low by device circuit board.
+ * 4: GOING_TO_SLEEP
+ * Indicates MAC is entering a power-saving sleep power-down.
+ * Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use IWX_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ */
+#define IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+
+#define IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
+#define IWX_CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
+
+
+/* HW REV */
+#define IWX_CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
+#define IWX_CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+
+#define IWX_CSR_HW_REV_TYPE_MSK (0x000FFF0)
+
+/* CSR GIO */
+#define IWX_CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ * 4: UCODE_DISABLE
+ * Host sets this to request permanent halt of uCode, same as
+ * sending CARD_STATE command with "halt" bit set.
+ * 3: CT_KILL_EXIT
+ * Host sets this to request exit from CT_KILL state, i.e. host thinks
+ * device temperature is low enough to continue normal operation.
+ * 2: CMD_BLOCKED
+ * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ * to release uCode to clear all Tx and command queues, enter
+ * unassociated mode, and power down.
+ * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
+ * 1: SW_BIT_RFKILL
+ * Host sets this when issuing CARD_STATE command to request
+ * device sleep.
+ * 0: MAC_SLEEP
+ * uCode sets this when preparing a power-saving power-down.
+ * uCode resets this when power-up is complete and SRAM is sane.
+ * NOTE: device saves internal SRAM data to host when powering down,
+ * and must restore this data after powering back up.
+ * MAC_SLEEP is the best indication that restore is complete.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ */
+#define IWX_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
+#define IWX_CSR_UCODE_SW_BIT_RFKILL (0x00000002)
+#define IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
+#define IWX_CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+#define IWX_CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
+#define IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
+
+/* HPET MEM debug */
+#define IWX_CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define IWX_CSR_DRAM_INT_TBL_ENABLE (1U << 31)
+#define IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
+#define IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
+/* 22000 configuration registers */
+
+/*
+ * TFH Configuration register.
+ *
+ * BIT fields:
+ *
+ * Bits 3:0:
+ * Define the maximum number of pending read requests.
+ * Maximum configration value allowed is 0xC
+ * Bits 9:8:
+ * Define the maximum transfer size. (64 / 128 / 256)
+ * Bit 10:
+ * When bit is set and transfer size is set to 128B, the TFH will enable
+ * reading chunks of more than 64B only if the read address is aligned to 128B.
+ * In case of DRAM read address which is not aligned to 128B, the TFH will
+ * enable transfer size which doesn't cross 64B DRAM address boundary.
+*/
+#define IWX_TFH_TRANSFER_MODE (0x1F40)
+#define IWX_TFH_TRANSFER_MAX_PENDING_REQ 0xc
+#define IWX_TFH_CHUNK_SIZE_128 (1 << 8)
+#define IWX_TFH_CHUNK_SPLIT_MODE (1 << 10)
+
+/*
+ * Defines the offset address in dwords referring from the beginning of the
+ * Tx CMD which will be updated in DRAM.
+ * Note that the TFH offset address for Tx CMD update is always referring to
+ * the start of the TFD first TB.
+ * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
+ */
+#define IWX_TFH_TXCMD_UPDATE_CFG (0x1F48)
+
+/*
+ * Controls TX DMA operation
+ *
+ * BIT fields:
+ *
+ * Bits 31:30: Enable the SRAM DMA channel.
+ * Turning on bit 31 will kick the SRAM2DRAM DMA.
+ * Note that the sram2dram may be enabled only after configuring the DRAM and
+ * SRAM addresses registers and the byte count register.
+ * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
+ * set to 1 - interrupt is sent to the driver
+ * Bit 0: Indicates the snoop configuration
+*/
+#define IWX_TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
+#define IWX_TFH_SRV_DMA_SNOOP (1 << 0)
+#define IWX_TFH_SRV_DMA_TO_DRIVER (1 << 24)
+#define IWX_TFH_SRV_DMA_START (1U << 31)
+
+/* Defines the DMA SRAM write start address to transfer a data block */
+#define IWX_TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
+
+/* Defines the 64bits DRAM start address to read the DMA data block from */
+#define IWX_TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
+
+/*
+ * Defines the number of bytes to transfer from DRAM to SRAM.
+ * Note that this register may be configured with non-dword aligned size.
+ */
+#define IWX_TFH_SRV_DMA_CHNL0_BC (0x1F70)
+
+/* 9000 rx series registers */
+
+#define IWX_RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define IWX_RFH_Q_FRBDCB_BA_LSB(q) (IWX_RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define IWX_RFH_Q0_FRBDCB_WIDX 0xA08080
+#define IWX_RFH_Q_FRBDCB_WIDX(q) (IWX_RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Write index table - shadow registers */
+#define IWX_RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
+#define IWX_RFH_Q_FRBDCB_WIDX_TRG(q) (IWX_RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
+/* Read index table */
+#define IWX_RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define IWX_RFH_Q_FRBDCB_RIDX(q) (IWX_RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define IWX_RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define IWX_RFH_Q_URBDCB_BA_LSB(q) (IWX_RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define IWX_RFH_Q0_URBDCB_WIDX 0xA08180
+#define IWX_RFH_Q_URBDCB_WIDX(q) (IWX_RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define IWX_RFH_Q0_URBDCB_VAID 0xA081C0
+#define IWX_RFH_Q_URBDCB_VAID(q) (IWX_RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define IWX_RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define IWX_RFH_Q_URBD_STTS_WPTR_LSB(q) (IWX_RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define IWX_RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define IWX_RFH_Q_ORB_WPTR_LSB(q) (IWX_RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define IWX_RFH_RBDBUF_RBD0_LSB 0xA08300
+#define IWX_RFH_RBDBUF_RBD_LSB(q) (IWX_RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/**
+ * RFH Status Register
+ *
+ * Bit fields:
+ *
+ * Bit 29: RBD_FETCH_IDLE
+ * This status flag is set by the RFH when there is no active RBD fetch from
+ * DRAM.
+ * Once the RFH RBD controller starts fetching (or when there is a pending
+ * RBD read response from DRAM), this flag is immediately turned off.
+ *
+ * Bit 30: SRAM_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * SRAM to DRAM.
+ * Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
+ *
+ * Bit 31: RXF_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * RXF to DRAM.
+ * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
+ */
+#define IWX_RFH_GEN_STATUS 0xA09808
+#define IWX_RFH_GEN_STATUS_GEN3 0xA07824
+#define IWX_RBD_FETCH_IDLE (1 << 29)
+#define IWX_SRAM_DMA_IDLE (1 << 30)
+#define IWX_RXF_DMA_IDLE (1U << 31)
+
+/* DMA configuration */
+#define IWX_RFH_RXF_DMA_CFG 0xA09820
+#define IWX_RFH_RXF_DMA_CFG_GEN3 0xA07880
+/* RB size */
+#define IWX_RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define IWX_RFH_RXF_DMA_RB_SIZE_POS 16
+#define IWX_RFH_RXF_DMA_RB_SIZE_1K (0x1 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_2K (0x2 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_4K (0x4 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_8K (0x8 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_12K (0x9 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_16K (0xA << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_20K (0xB << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_24K (0xC << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_28K (0xD << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_32K (0xE << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
+#define IWX_RFH_RXF_DMA_MIN_RB_SIZE_POS 24
+#define IWX_RFH_RXF_DMA_MIN_RB_4_8 (3 << IWX_RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
+#define IWX_RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
+#define IWX_RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
+#define IWX_RFH_DMA_EN_ENABLE_VAL (1U << 31)
+
+#define IWX_RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define IWX_RFH_GEN_CFG 0xA09800
+#define IWX_RFH_GEN_CFG_SERVICE_DMA_SNOOP (1 << 0)
+#define IWX_RFH_GEN_CFG_RFH_DMA_SNOOP (1 << 1)
+#define IWX_RFH_GEN_CFG_RB_CHUNK_SIZE_128 0x00000010
+#define IWX_RFH_GEN_CFG_RB_CHUNK_SIZE_64 0x00000000
+/* the driver assumes everywhere that the default RXQ is 0 */
+#define IWX_RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00
+
+/* end of 9000 rx series registers */
+
+/*
+ * This register is writen by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define IWX_UREG_UCODE_LOAD_STATUS (0xa05c40)
+#define IWX_UREG_CPU_INIT_RUN (0xa05c44)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define IWX_HBUS_BASE (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job. Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ * 0-31: memory address within device
+ */
+#define IWX_HBUS_TARG_MEM_RADDR (IWX_HBUS_BASE+0x00c)
+#define IWX_HBUS_TARG_MEM_WADDR (IWX_HBUS_BASE+0x010)
+#define IWX_HBUS_TARG_MEM_WDAT (IWX_HBUS_BASE+0x018)
+#define IWX_HBUS_TARG_MEM_RDAT (IWX_HBUS_BASE+0x01c)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.). First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ * 0-15: register address (offset) within device
+ * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define IWX_HBUS_TARG_PRPH_WADDR (IWX_HBUS_BASE+0x044)
+#define IWX_HBUS_TARG_PRPH_RADDR (IWX_HBUS_BASE+0x048)
+#define IWX_HBUS_TARG_PRPH_WDAT (IWX_HBUS_BASE+0x04c)
+#define IWX_HBUS_TARG_PRPH_RDAT (IWX_HBUS_BASE+0x050)
+
+/* enable the ID buf for read */
+#define IWX_WFPM_PS_CTL_CLR 0xa0300c
+#define IWX_WFMP_MAC_ADDR_0 0xa03080
+#define IWX_WFMP_MAC_ADDR_1 0xa03084
+#define IWX_LMPM_PMG_EN 0xa01cec
+#define IWX_RADIO_REG_SYS_MANUAL_DFT_0 0xad4078
+#define IWX_RFIC_REG_RD 0xad0470
+#define IWX_WFPM_CTRL_REG 0xa03030
+#define IWX_WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK 0x08000000
+#define IWX_ENABLE_WFPM 0x80000000
+
+#define IWX_AUX_MISC_REG 0xa200b0
+#define IWX_HW_STEP_LOCATION_BITS 24
+
+#define IWX_AUX_MISC_MASTER1_EN 0xa20818
+#define IWX_AUX_MISC_MASTER1_EN_SBE_MSK 0x1
+#define IWX_AUX_MISC_MASTER1_SMPHR_STATUS 0xa20800
+#define IWX_RSA_ENABLE 0xa24b08
+#define IWX_PREG_AUX_BUS_WPROT_0 0xa04cc0
+#define IWX_PREG_PRPH_WPROT_9000 0xa04ce0
+#define IWX_PREG_PRPH_WPROT_22000 0xa04d00
+#define IWX_SB_CFG_OVERRIDE_ADDR 0xa26c78
+#define IWX_SB_CFG_OVERRIDE_ENABLE 0x8000
+#define IWX_SB_CFG_BASE_OVERRIDE 0xa20000
+#define IWX_SB_MODIFY_CFG_FLAG 0xa03088
+#define IWX_SB_CPU_1_STATUS 0xa01e30
+#define IWX_SB_CPU_2_STATUS 0Xa01e34
+
+#define IWX_UREG_CHICK 0xa05c00
+#define IWX_UREG_CHICK_MSI_ENABLE (1 << 24)
+#define IWX_UREG_CHICK_MSIX_ENABLE (1 << 25)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ * 0-7: queue write index
+ * 11-8: queue selector
+ */
+#define IWX_HBUS_TARG_WRPTR (IWX_HBUS_BASE+0x060)
+
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ */
+#define IWX_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWX_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWX_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWX_HOST_INT_OPER_MODE (1U << 31)
+
+/*****************************************************************************
+ * MSIX related registers *
+ *****************************************************************************/
+
+#define IWX_CSR_MSIX_BASE (0x2000)
+#define IWX_CSR_MSIX_FH_INT_CAUSES_AD (IWX_CSR_MSIX_BASE + 0x800)
+#define IWX_CSR_MSIX_FH_INT_MASK_AD (IWX_CSR_MSIX_BASE + 0x804)
+#define IWX_CSR_MSIX_HW_INT_CAUSES_AD (IWX_CSR_MSIX_BASE + 0x808)
+#define IWX_CSR_MSIX_HW_INT_MASK_AD (IWX_CSR_MSIX_BASE + 0x80C)
+#define IWX_CSR_MSIX_AUTOMASK_ST_AD (IWX_CSR_MSIX_BASE + 0x810)
+#define IWX_CSR_MSIX_RX_IVAR_AD_REG (IWX_CSR_MSIX_BASE + 0x880)
+#define IWX_CSR_MSIX_IVAR_AD_REG (IWX_CSR_MSIX_BASE + 0x890)
+#define IWX_CSR_MSIX_PENDING_PBA_AD (IWX_CSR_MSIX_BASE + 0x1000)
+#define IWX_CSR_MSIX_RX_IVAR(cause) (IWX_CSR_MSIX_RX_IVAR_AD_REG + (cause))
+#define IWX_CSR_MSIX_IVAR(cause) (IWX_CSR_MSIX_IVAR_AD_REG + (cause))
+
+/*
+ * Causes for the FH register interrupts
+ */
+enum msix_fh_int_causes {
+ IWX_MSIX_FH_INT_CAUSES_Q0 = (1 << 0),
+ IWX_MSIX_FH_INT_CAUSES_Q1 = (1 << 1),
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM = (1 << 16),
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM = (1 << 17),
+ IWX_MSIX_FH_INT_CAUSES_S2D = (1 << 19),
+ IWX_MSIX_FH_INT_CAUSES_FH_ERR = (1 << 21),
+};
+
+/*
+ * Causes for the HW register interrupts
+ */
+enum msix_hw_int_causes {
+ IWX_MSIX_HW_INT_CAUSES_REG_ALIVE = (1 << 0),
+ IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP = (1 << 1),
+ IWX_MSIX_HW_INT_CAUSES_REG_IPC = (1 << 1),
+ IWX_MSIX_HW_INT_CAUSES_REG_IML = (1 << 2),
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = (1 << 5),
+ IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL = (1 << 6),
+ IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL = (1 << 7),
+ IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC = (1 << 8),
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR = (1 << 25),
+ IWX_MSIX_HW_INT_CAUSES_REG_SCD = (1 << 26),
+ IWX_MSIX_HW_INT_CAUSES_REG_FH_TX = (1 << 27),
+ IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR = (1 << 29),
+ IWX_MSIX_HW_INT_CAUSES_REG_HAP = (1 << 30),
+};
+
+/*
+ * Registers to map causes to vectors
+ */
+enum msix_ivar_for_cause {
+ IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM = 0x0,
+ IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM = 0x1,
+ IWX_MSIX_IVAR_CAUSE_S2D = 0x3,
+ IWX_MSIX_IVAR_CAUSE_FH_ERR = 0x5,
+ IWX_MSIX_IVAR_CAUSE_REG_ALIVE = 0x10,
+ IWX_MSIX_IVAR_CAUSE_REG_WAKEUP = 0x11,
+ IWX_MSIX_IVAR_CAUSE_REG_IML = 0x12,
+ IWX_MSIX_IVAR_CAUSE_REG_CT_KILL = 0x16,
+ IWX_MSIX_IVAR_CAUSE_REG_RF_KILL = 0x17,
+ IWX_MSIX_IVAR_CAUSE_REG_PERIODIC = 0x18,
+ IWX_MSIX_IVAR_CAUSE_REG_SW_ERR = 0x29,
+ IWX_MSIX_IVAR_CAUSE_REG_SCD = 0x2a,
+ IWX_MSIX_IVAR_CAUSE_REG_FH_TX = 0x2b,
+ IWX_MSIX_IVAR_CAUSE_REG_HW_ERR = 0x2d,
+ IWX_MSIX_IVAR_CAUSE_REG_HAP = 0x2e,
+};
+
+#define IWX_MSIX_AUTO_CLEAR_CAUSE (0 << 7)
+#define IWX_MSIX_NON_AUTO_CLEAR_CAUSE (1 << 7)
+
+/**
+ * uCode API flags
+ * @IWX_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWX_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWX_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWX_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWX_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWX_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWX_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
+ * @IWX_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWX_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWX_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWX_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWX_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ * @IWX_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWX_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWX_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ *
+ */
+#define IWX_UCODE_TLV_FLAGS_PAN (1 << 0)
+#define IWX_UCODE_TLV_FLAGS_NEWSCAN (1 << 1)
+#define IWX_UCODE_TLV_FLAGS_MFP (1 << 2)
+#define IWX_UCODE_TLV_FLAGS_P2P (1 << 3)
+#define IWX_UCODE_TLV_FLAGS_DW_BC_TABLE (1 << 4)
+#define IWX_UCODE_TLV_FLAGS_SHORT_BL (1 << 7)
+#define IWX_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS (1 << 10)
+#define IWX_UCODE_TLV_FLAGS_NO_BASIC_SSID (1 << 12)
+#define IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL (1 << 15)
+#define IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE (1 << 16)
+#define IWX_UCODE_TLV_FLAGS_P2P_PS (1 << 21)
+#define IWX_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM (1 << 22)
+#define IWX_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM (1 << 23)
+#define IWX_UCODE_TLV_FLAGS_UAPSD_SUPPORT (1 << 24)
+#define IWX_UCODE_TLV_FLAGS_EBS_SUPPORT (1 << 25)
+#define IWX_UCODE_TLV_FLAGS_P2P_PS_UAPSD (1 << 26)
+#define IWX_UCODE_TLV_FLAGS_BCAST_FILTERING (1 << 29)
+#define IWX_UCODE_TLV_FLAGS_GO_UAPSD (1 << 30)
+#define IWX_UCODE_TLV_FLAGS_LTE_COEX (1U << 31)
+
+#define IWX_UCODE_TLV_FLAG_BITS \
+ "\020\1PAN\2NEWSCAN\3MFP\4P2P\5DW_BC_TABLE\6NEWBT_COEX\7PM_CMD\10SHORT_BL\11RX_ENERGY\12TIME_EVENT_V2\13D3_6_IPV6\14BF_UPDATED\15NO_BASIC_SSID\17D3_CONTINUITY\20NEW_NSOFFL_S\21NEW_NSOFFL_L\22SCHED_SCAN\24STA_KEY_CMD\25DEVICE_PS_CMD\26P2P_PS\27P2P_PS_DCM\30P2P_PS_SCM\31UAPSD_SUPPORT\32EBS\33P2P_PS_UAPSD\36BCAST_FILTERING\37GO_UAPSD\40LTE_COEX"
+
+/**
+ * uCode TLV api
+ * @IWX_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ * @IWX_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
+ * @IWX_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
+ * @IWX_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWX_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWX_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ * (command version 3) that supports per-chain limits
+ * @IWX_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
+ * @IWX_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ * ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWX_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
+ * @IWX_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ * instead of 3.
+ * @IWX_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
+ * @IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ * SCAN_CONFIG_DB_CMD_API_S.
+ *
+ * @IWX_NUM_UCODE_TLV_API: number of bits used
+ */
+#define IWX_UCODE_TLV_API_FRAGMENTED_SCAN 8
+#define IWX_UCODE_TLV_API_WIFI_MCC_UPDATE 9
+#define IWX_UCODE_TLV_API_WIDE_CMD_HDR 14
+#define IWX_UCODE_TLV_API_LQ_SS_PARAMS 18
+#define IWX_UCODE_TLV_API_NEW_VERSION 20
+#define IWX_UCODE_TLV_API_EXT_SCAN_PRIORITY 24
+#define IWX_UCODE_TLV_API_TX_POWER_CHAIN 27
+#define IWX_UCODE_TLV_API_SCAN_TSF_REPORT 28
+#define IWX_UCODE_TLV_API_TKIP_MIC_KEYS 29
+#define IWX_UCODE_TLV_API_STA_TYPE 30
+#define IWX_UCODE_TLV_API_NAN2_VER2 31
+#define IWX_UCODE_TLV_API_ADAPTIVE_DWELL 32
+#define IWX_UCODE_TLV_API_NEW_RX_STATS 35
+#define IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2 42
+#define IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG 56
+#define IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER 58
+#define IWX_NUM_UCODE_TLV_API 128
+
+#define IWX_UCODE_TLV_API_BITS \
+ "\020\10FRAGMENTED_SCAN\11WIFI_MCC_UPDATE\16WIDE_CMD_HDR\22LQ_SS_PARAMS\30EXT_SCAN_PRIO\33TX_POWER_CHAIN\35TKIP_MIC_KEYS"
+
+/**
+ * uCode capabilities
+ * @IWX_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWX_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWX_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWX_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWX_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
+ * @IWX_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWX_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWX_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWX_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWX_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWX_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWX_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
+ * @IWX_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWX_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWX_UCODE_TLV_CAPA_2G_COEX_SUPPORT: supports 2G coex Command
+ * @IWX_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
+ * @IWX_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWX_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
+ * @IWX_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ * sources for the MCC. This TLV bit is a future replacement to
+ * IWX_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ * is supported.
+ * @IWX_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWX_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWX_UCODE_TLV_CAPA_NAN_SUPPORT: supports NAN
+ * @IWX_UCODE_TLV_CAPA_UMAC_UPLOAD: supports upload mode in umac (1=supported,
+ * 0=no support)
+ * @IWx_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
+ * (6 GHz).
+ * @IWX_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWX_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ * @IWX_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWX_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
+ * antenna the beacon should be transmitted
+ * @IWX_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ * from AP and will send it upon d0i3 exit.
+ * @IWX_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWX_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ * thresholds reporting
+ * @IWX_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWX_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ * regular image.
+ * @IWX_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWX_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWX_UCODE_TLV_CAPA_LMAC_UPLOAD: supports upload mode in lmac (1=supported,
+ * 0=no support)
+ *
+ * @IWX_NUM_UCODE_TLV_CAPA: number of bits used
+ */
+#define IWX_UCODE_TLV_CAPA_D0I3_SUPPORT 0
+#define IWX_UCODE_TLV_CAPA_LAR_SUPPORT 1
+#define IWX_UCODE_TLV_CAPA_UMAC_SCAN 2
+#define IWX_UCODE_TLV_CAPA_BEAMFORMER 3
+#define IWX_UCODE_TLV_CAPA_TOF_SUPPORT 5
+#define IWX_UCODE_TLV_CAPA_TDLS_SUPPORT 6
+#define IWX_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT 8
+#define IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT 9
+#define IWX_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT 10
+#define IWX_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT 11
+#define IWX_UCODE_TLV_CAPA_DQA_SUPPORT 12
+#define IWX_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH 13
+#define IWX_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG 17
+#define IWX_UCODE_TLV_CAPA_HOTSPOT_SUPPORT 18
+#define IWX_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT 19
+#define IWX_UCODE_TLV_CAPA_2G_COEX_SUPPORT 20
+#define IWX_UCODE_TLV_CAPA_CSUM_SUPPORT 21
+#define IWX_UCODE_TLV_CAPA_RADIO_BEACON_STATS 22
+#define IWX_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD 26
+#define IWX_UCODE_TLV_CAPA_BT_COEX_PLCR 28
+#define IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC 29
+#define IWX_UCODE_TLV_CAPA_BT_COEX_RRC 30
+#define IWX_UCODE_TLV_CAPA_GSCAN_SUPPORT 31
+#define IWX_UCODE_TLV_CAPA_NAN_SUPPORT 34
+#define IWX_UCODE_TLV_CAPA_UMAC_UPLOAD 35
+#define IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT 39
+#define IWX_UCODE_TLV_CAPA_CDB_SUPPORT 40
+#define IWX_UCODE_TLV_CAPA_TLC_OFFLOAD 43
+#define IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA 44
+#define IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS 48
+#define IWX_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE 64
+#define IWX_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS 65
+#define IWX_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT 67
+#define IWX_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT 68
+#define IWX_UCODE_TLV_CAPA_BEACON_ANT_SELECTION 71
+#define IWX_UCODE_TLV_CAPA_BEACON_STORING 72
+#define IWX_UCODE_TLV_CAPA_LAR_SUPPORT_V2 73
+#define IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW 74
+#define IWX_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT 75
+#define IWX_UCODE_TLV_CAPA_CTDP_SUPPORT 76
+#define IWX_UCODE_TLV_CAPA_USNIFFER_UNIFIED 77
+#define IWX_UCODE_TLV_CAPA_LMAC_UPLOAD 79
+#define IWX_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG 80
+#define IWX_UCODE_TLV_CAPA_LQM_SUPPORT 81
+#define IWX_UCODE_TLV_CAPA_LED_CMD_SUPPORT 88
+
+#define IWX_NUM_UCODE_TLV_CAPA 128
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWX_CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define IWX_PAGING_SEPARATOR_SECTION 0xAAAABBBB
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWX_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWX_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWX_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWX_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwx_tlv_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+#define IWX_FW_PHY_CFG_RADIO_TYPE_POS 0
+#define IWX_FW_PHY_CFG_RADIO_TYPE (0x3 << IWX_FW_PHY_CFG_RADIO_TYPE_POS)
+#define IWX_FW_PHY_CFG_RADIO_STEP_POS 2
+#define IWX_FW_PHY_CFG_RADIO_STEP (0x3 << IWX_FW_PHY_CFG_RADIO_STEP_POS)
+#define IWX_FW_PHY_CFG_RADIO_DASH_POS 4
+#define IWX_FW_PHY_CFG_RADIO_DASH (0x3 << IWX_FW_PHY_CFG_RADIO_DASH_POS)
+#define IWX_FW_PHY_CFG_TX_CHAIN_POS 16
+#define IWX_FW_PHY_CFG_TX_CHAIN (0xf << IWX_FW_PHY_CFG_TX_CHAIN_POS)
+#define IWX_FW_PHY_CFG_RX_CHAIN_POS 20
+#define IWX_FW_PHY_CFG_RX_CHAIN (0xf << IWX_FW_PHY_CFG_RX_CHAIN_POS)
+
+/**
+ * struct iwx_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwx_fw_cipher_scheme {
+ uint32_t cipher;
+ uint8_t flags;
+ uint8_t hdr_len;
+ uint8_t pn_len;
+ uint8_t pn_off;
+ uint8_t key_idx_off;
+ uint8_t key_idx_mask;
+ uint8_t key_idx_shift;
+ uint8_t mic_len;
+ uint8_t hw_cipher;
+} __packed;
+
+/**
+ * struct iwx_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwx_fw_cscheme_list {
+ uint8_t size;
+ struct iwx_fw_cipher_scheme cs[];
+} __packed;
+
+/* v1/v2 uCode file layout */
+struct iwx_ucode_header {
+ uint32_t ver; /* major/minor/API/serial */
+ union {
+ struct {
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v1;
+ struct {
+ uint32_t build; /* build number */
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v2;
+ } u;
+};
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data.
+ */
+
+#define IWX_UCODE_TLV_INVALID 0 /* unused */
+#define IWX_UCODE_TLV_INST 1
+#define IWX_UCODE_TLV_DATA 2
+#define IWX_UCODE_TLV_INIT 3
+#define IWX_UCODE_TLV_INIT_DATA 4
+#define IWX_UCODE_TLV_BOOT 5
+#define IWX_UCODE_TLV_PROBE_MAX_LEN 6 /* a uint32_t value */
+#define IWX_UCODE_TLV_PAN 7
+#define IWX_UCODE_TLV_RUNT_EVTLOG_PTR 8
+#define IWX_UCODE_TLV_RUNT_EVTLOG_SIZE 9
+#define IWX_UCODE_TLV_RUNT_ERRLOG_PTR 10
+#define IWX_UCODE_TLV_INIT_EVTLOG_PTR 11
+#define IWX_UCODE_TLV_INIT_EVTLOG_SIZE 12
+#define IWX_UCODE_TLV_INIT_ERRLOG_PTR 13
+#define IWX_UCODE_TLV_ENHANCE_SENS_TBL 14
+#define IWX_UCODE_TLV_PHY_CALIBRATION_SIZE 15
+#define IWX_UCODE_TLV_WOWLAN_INST 16
+#define IWX_UCODE_TLV_WOWLAN_DATA 17
+#define IWX_UCODE_TLV_FLAGS 18
+#define IWX_UCODE_TLV_SEC_RT 19
+#define IWX_UCODE_TLV_SEC_INIT 20
+#define IWX_UCODE_TLV_SEC_WOWLAN 21
+#define IWX_UCODE_TLV_DEF_CALIB 22
+#define IWX_UCODE_TLV_PHY_SKU 23
+#define IWX_UCODE_TLV_SECURE_SEC_RT 24
+#define IWX_UCODE_TLV_SECURE_SEC_INIT 25
+#define IWX_UCODE_TLV_SECURE_SEC_WOWLAN 26
+#define IWX_UCODE_TLV_NUM_OF_CPU 27
+#define IWX_UCODE_TLV_CSCHEME 28
+#define IWX_UCODE_TLV_API_CHANGES_SET 29
+#define IWX_UCODE_TLV_ENABLED_CAPABILITIES 30
+#define IWX_UCODE_TLV_N_SCAN_CHANNELS 31
+#define IWX_UCODE_TLV_PAGING 32
+#define IWX_UCODE_TLV_SEC_RT_USNIFFER 34
+#define IWX_UCODE_TLV_SDIO_ADMA_ADDR 35
+#define IWX_UCODE_TLV_FW_VERSION 36
+#define IWX_UCODE_TLV_FW_DBG_DEST 38
+#define IWX_UCODE_TLV_FW_DBG_CONF 39
+#define IWX_UCODE_TLV_FW_DBG_TRIGGER 40
+#define IWX_UCODE_TLV_CMD_VERSIONS 48
+#define IWX_UCODE_TLV_FW_GSCAN_CAPA 50
+#define IWX_UCODE_TLV_FW_MEM_SEG 51
+#define IWX_UCODE_TLV_IML 52
+#define IWX_UCODE_TLV_FW_FMAC_API_VERSION 53
+#define IWX_UCODE_TLV_UMAC_DEBUG_ADDRS 54
+#define IWX_UCODE_TLV_LMAC_DEBUG_ADDRS 55
+#define IWX_UCODE_TLV_FW_RECOVERY_INFO 57
+#define IWX_UCODE_TLV_FW_FMAC_RECOVERY_INFO 59
+#define IWX_UCODE_TLV_FW_FSEQ_VERSION 60
+
+#define IWX_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWX_UCODE_TLV_TYPE_DEBUG_INFO (IWX_UCODE_TLV_DEBUG_BASE + 0)
+#define IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION (IWX_UCODE_TLV_DEBUG_BASE + 1)
+#define IWX_UCODE_TLV_TYPE_HCMD (IWX_UCODE_TLV_DEBUG_BASE + 2)
+#define IWX_UCODE_TLV_TYPE_REGIONS (IWX_UCODE_TLV_DEBUG_BASE + 3)
+#define IWX_UCODE_TLV_TYPE_TRIGGERS (IWX_UCODE_TLV_DEBUG_BASE + 4)
+#define IWX_UCODE_TLV_DEBUG_MAX IWX_UCODE_TLV_TYPE_TRIGGERS
+
+
+struct iwx_ucode_tlv {
+ uint32_t type; /* see above */
+ uint32_t length; /* not including type/length fields */
+ uint8_t data[0];
+};
+
+struct iwx_ucode_api {
+ uint32_t api_index;
+ uint32_t api_flags;
+} __packed;
+
+struct iwx_ucode_capa {
+ uint32_t api_index;
+ uint32_t api_capa;
+} __packed;
+
+#define IWX_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwx_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ uint32_t zero;
+ uint32_t magic;
+ uint8_t human_readable[64];
+ uint32_t ver; /* major/minor/API/serial */
+ uint32_t build;
+ uint64_t ignore;
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ uint8_t data[0];
+};
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via IWX_HBUS_TARG_PRPH_* registers.
+ */
+#define IWX_PRPH_BASE (0x00000)
+#define IWX_PRPH_END (0xFFFFF)
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * struct iwx_rb_status - receive buffer status
+ * host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * which was transferred
+ */
+struct iwx_rb_status {
+ uint16_t closed_rb_num;
+ uint16_t closed_fr_num;
+ uint16_t finished_rb_num;
+ uint16_t finished_fr_nam;
+ uint32_t unused;
+} __packed;
+
+
+#define IWX_TFD_QUEUE_SIZE_MAX (256)
+#define IWX_TFD_QUEUE_SIZE_MAX_GEN3 (65536)
+/* cb size is the exponent - 3 */
+#define IWX_TFD_QUEUE_CB_SIZE(x) (IWX_RX_QUEUE_CB_SIZE(x) - 3)
+#define IWX_TFD_QUEUE_SIZE_BC_DUP (64)
+#define IWX_TFD_QUEUE_BC_SIZE (IWX_TFD_QUEUE_SIZE_MAX + \
+ IWX_TFD_QUEUE_SIZE_BC_DUP)
+#define IWX_TFD_QUEUE_BC_SIZE_GEN3 (IWX_TFD_QUEUE_SIZE_MAX_GEN3 + \
+ IWX_TFD_QUEUE_SIZE_BC_DUP)
+#define IWX_TFH_NUM_TBS 25
+
+/**
+ * struct iwx_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwx_tfh_tb {
+ uint16_t tb_len;
+ uint64_t addr;
+} __packed;
+
+/**
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs.
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ *
+ * Each TFD contains pointer/size information for up to 25 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+
+/**
+ * struct iwx_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ * 5 -15 reserved
+ * @ tbs[25] transmit frame buffer descriptors
+ * @ __pad padding
+ */
+struct iwx_tfh_tfd {
+ uint16_t num_tbs;
+ struct iwx_tfh_tb tbs[IWX_TFH_NUM_TBS];
+ uint32_t __pad;
+} __packed;
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwx_agn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by IWX_SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwx_agn_scd_bc_tbl {
+ uint16_t tfd_offset[IWX_TFD_QUEUE_BC_SIZE];
+} __packed;
+
+/**
+ * struct iwx_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
+ */
+struct iwx_gen3_bc_tbl {
+ uint16_t tfd_offset[IWX_TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
+/* Maximum number of Tx queues. */
+#define IWX_MAX_QUEUES 31
+
+/**
+ * DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * to allow dynamic allocation of queues on-demand, rather than allocate them
+ * statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ * TXQ #2 - P2P device frames
+ * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ * TXQ #4 - BSS DATA frames queue
+ * TXQ #5-8 - non-QoS data, QoS no-data, and MGMT frames queue pool
+ * TXQ #9 - P2P GO/SoftAP probe responses
+ * TXQ #10-31 - QoS DATA frames queue pool (for Tx aggregation)
+ */
+
+/* static DQA Tx queue numbers */
+#define IWX_DQA_CMD_QUEUE 0
+#define IWX_DQA_AUX_QUEUE 1
+#define IWX_DQA_P2P_DEVICE_QUEUE 2
+#define IWX_DQA_INJECT_MONITOR_QUEUE 2
+#define IWX_DQA_GCAST_QUEUE 3
+#define IWX_DQA_BSS_CLIENT_QUEUE 4
+#define IWX_DQA_MIN_MGMT_QUEUE 5
+#define IWX_DQA_MAX_MGMT_QUEUE 8
+#define IWX_DQA_AP_PROBE_RESP_QUEUE 9
+#define IWX_DQA_MIN_DATA_QUEUE 10
+#define IWX_DQA_MAX_DATA_QUEUE 31
+
+#define IWX_TX_FIFO_BK 0
+#define IWX_TX_FIFO_BE 1
+#define IWX_TX_FIFO_VI 2
+#define IWX_TX_FIFO_VO 3
+#define IWX_TX_FIFO_MCAST 5
+#define IWX_TX_FIFO_CMD 7
+
+enum iwx_gen2_tx_fifo {
+ IWX_GEN2_TX_FIFO_CMD = 0,
+ IWX_GEN2_EDCA_TX_FIFO_BK,
+ IWX_GEN2_EDCA_TX_FIFO_BE,
+ IWX_GEN2_EDCA_TX_FIFO_VI,
+ IWX_GEN2_EDCA_TX_FIFO_VO,
+ IWX_GEN2_TRIG_TX_FIFO_BK,
+ IWX_GEN2_TRIG_TX_FIFO_BE,
+ IWX_GEN2_TRIG_TX_FIFO_VI,
+ IWX_GEN2_TRIG_TX_FIFO_VO,
+};
+
+/**
+ * TXQ config options
+ * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
+ * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
+ */
+#define IWX_TX_QUEUE_CFG_ENABLE_QUEUE (1 << 0)
+#define IWX_TX_QUEUE_CFG_TFD_SHORT_FORMAT (1 << 1)
+
+#define IWX_DEFAULT_QUEUE_SIZE IWX_TFD_QUEUE_SIZE_MAX
+#define IWX_CMD_QUEUE_SIZE 32
+
+/**
+ * struct iwx_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: see &enum iwl_tx_queue_cfg_actions
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwx_tx_queue_cfg_cmd {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint16_t flags;
+ uint32_t cb_size;
+ uint64_t byte_cnt_addr;
+ uint64_t tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ * @reserved: reserved
+ */
+struct iwx_tx_queue_cfg_rsp {
+ uint16_t queue_number;
+ uint16_t flags;
+ uint16_t write_pointer;
+ uint16_t reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
+
+/*
+ * Commands
+ */
+#define IWX_ALIVE 0x1
+#define IWX_REPLY_ERROR 0x2
+#define IWX_INIT_COMPLETE_NOTIF 0x4
+
+/* PHY context commands */
+#define IWX_PHY_CONTEXT_CMD 0x8
+#define IWX_DBG_CFG 0x9
+
+/* UMAC scan commands */
+#define IWX_SCAN_ITERATION_COMPLETE_UMAC 0xb5
+#define IWX_SCAN_CFG_CMD 0xc
+#define IWX_SCAN_REQ_UMAC 0xd
+#define IWX_SCAN_ABORT_UMAC 0xe
+#define IWX_SCAN_COMPLETE_UMAC 0xf
+
+/* station table */
+#define IWX_ADD_STA_KEY 0x17
+#define IWX_ADD_STA 0x18
+#define IWX_REMOVE_STA 0x19
+
+/* TX */
+#define IWX_TX_CMD 0x1c
+#define IWX_TXPATH_FLUSH 0x1e
+#define IWX_MGMT_MCAST_KEY 0x1f
+
+/* scheduler config */
+#define IWX_SCD_QUEUE_CFG 0x1d
+
+/* global key */
+#define IWX_WEP_KEY 0x20
+
+/* MAC and Binding commands */
+#define IWX_MAC_CONTEXT_CMD 0x28
+#define IWX_TIME_EVENT_CMD 0x29 /* both CMD and response */
+#define IWX_TIME_EVENT_NOTIFICATION 0x2a
+#define IWX_BINDING_CONTEXT_CMD 0x2b
+#define IWX_TIME_QUOTA_CMD 0x2c
+#define IWX_NON_QOS_TX_COUNTER_CMD 0x2d
+
+/* Calibration */
+#define IWX_TEMPERATURE_NOTIFICATION 0x62
+#define IWX_CALIBRATION_CFG_CMD 0x65
+#define IWX_CALIBRATION_RES_NOTIFICATION 0x66
+#define IWX_CALIBRATION_COMPLETE_NOTIFICATION 0x67
+#define IWX_RADIO_VERSION_NOTIFICATION 0x68
+
+/* Phy */
+#define IWX_PHY_CONFIGURATION_CMD 0x6a
+
+/* Power - legacy power table command */
+#define IWX_POWER_TABLE_CMD 0x77
+#define IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION 0x78
+#define IWX_LTR_CONFIG 0xee
+
+/* NVM */
+#define IWX_NVM_ACCESS_CMD 0x88
+
+#define IWX_SET_CALIB_DEFAULT_CMD 0x8e
+
+#define IWX_BEACON_NOTIFICATION 0x90
+#define IWX_BEACON_TEMPLATE_CMD 0x91
+#define IWX_TX_ANT_CONFIGURATION_CMD 0x98
+#define IWX_BT_CONFIG 0x9b
+#define IWX_STATISTICS_CMD 0x9c
+#define IWX_STATISTICS_NOTIFICATION 0x9d
+#define IWX_REDUCE_TX_POWER_CMD 0x9f
+
+/* RF-KILL commands and notifications */
+#define IWX_CARD_STATE_CMD 0xa0
+#define IWX_CARD_STATE_NOTIFICATION 0xa1
+
+#define IWX_MISSED_BEACONS_NOTIFICATION 0xa2
+
+#define IWX_MFUART_LOAD_NOTIFICATION 0xb1
+
+/* Power - new power table command */
+#define IWX_MAC_PM_POWER_TABLE 0xa9
+
+#define IWX_REPLY_RX_PHY_CMD 0xc0
+#define IWX_REPLY_RX_MPDU_CMD 0xc1
+#define IWX_BA_NOTIF 0xc5
+
+/* Location Aware Regulatory */
+#define IWX_MCC_UPDATE_CMD 0xc8
+
+/* BT Coex */
+#define IWX_BT_COEX_PRIO_TABLE 0xcc
+#define IWX_BT_COEX_PROT_ENV 0xcd
+#define IWX_BT_PROFILE_NOTIFICATION 0xce
+#define IWX_BT_COEX_CI 0x5d
+
+#define IWX_REPLY_SF_CFG_CMD 0xd1
+#define IWX_REPLY_BEACON_FILTERING_CMD 0xd2
+
+/* DTS measurements */
+#define IWX_CMD_DTS_MEASUREMENT_TRIGGER 0xdc
+#define IWX_DTS_MEASUREMENT_NOTIFICATION 0xdd
+
+#define IWX_REPLY_DEBUG_CMD 0xf0
+#define IWX_DEBUG_LOG_MSG 0xf7
+
+#define IWX_MCAST_FILTER_CMD 0xd0
+
+/* D3 commands/notifications */
+#define IWX_D3_CONFIG_CMD 0xd3
+#define IWX_PROT_OFFLOAD_CONFIG_CMD 0xd4
+#define IWX_OFFLOADS_QUERY_CMD 0xd5
+#define IWX_REMOTE_WAKE_CONFIG_CMD 0xd6
+
+/* for WoWLAN in particular */
+#define IWX_WOWLAN_PATTERNS 0xe0
+#define IWX_WOWLAN_CONFIGURATION 0xe1
+#define IWX_WOWLAN_TSC_RSC_PARAM 0xe2
+#define IWX_WOWLAN_TKIP_PARAM 0xe3
+#define IWX_WOWLAN_KEK_KCK_MATERIAL 0xe4
+#define IWX_WOWLAN_GET_STATUSES 0xe5
+#define IWX_WOWLAN_TX_POWER_PER_DB 0xe6
+
+/* and for NetDetect */
+#define IWX_NET_DETECT_CONFIG_CMD 0x54
+#define IWX_NET_DETECT_PROFILES_QUERY_CMD 0x56
+#define IWX_NET_DETECT_PROFILES_CMD 0x57
+#define IWX_NET_DETECT_HOTSPOTS_CMD 0x58
+#define IWX_NET_DETECT_HOTSPOTS_QUERY_CMD 0x59
+
+/* system group command IDs */
+#define IWX_FSEQ_VER_MISMATCH_NOTIFICATION 0xff
+
+#define IWX_REPLY_MAX 0xff
+
+/* PHY_OPS subcommand IDs */
+#define IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE 0x0
+#define IWX_CTDP_CONFIG_CMD 0x03
+#define IWX_TEMP_REPORTING_THRESHOLDS_CMD 0x04
+#define IWX_CT_KILL_NOTIFICATION 0xFE
+#define IWX_DTS_MEASUREMENT_NOTIF_WIDE 0xFF
+
+/* command groups */
+#define IWX_LEGACY_GROUP 0x0
+#define IWX_LONG_GROUP 0x1
+#define IWX_SYSTEM_GROUP 0x2
+#define IWX_MAC_CONF_GROUP 0x3
+#define IWX_PHY_OPS_GROUP 0x4
+#define IWX_DATA_PATH_GROUP 0x5
+#define IWX_PROT_OFFLOAD_GROUP 0xb
+#define IWX_REGULATORY_AND_NVM_GROUP 0xc
+
+/* SYSTEM_GROUP group subcommand IDs */
+
+#define IWX_SHARED_MEM_CFG_CMD 0x00
+#define IWX_SOC_CONFIGURATION_CMD 0x01
+#define IWX_INIT_EXTENDED_CFG_CMD 0x03
+#define IWX_FW_ERROR_RECOVERY_CMD 0x07
+
+/* DATA_PATH group subcommand IDs */
+#define IWX_DQA_ENABLE_CMD 0x00
+
+/* REGULATORY_AND_NVM group subcommand IDs */
+#define IWX_NVM_ACCESS_COMPLETE 0x00
+#define IWX_NVM_GET_INFO 0x02
+
+/*
+ * struct iwx_dqa_enable_cmd
+ * @cmd_queue: the TXQ number of the command queue
+ */
+struct iwx_dqa_enable_cmd {
+ uint32_t cmd_queue;
+} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwx_cmd_response {
+ uint32_t status;
+};
+
+/*
+ * struct iwx_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwx_tx_ant_cfg_cmd {
+ uint32_t valid;
+} __packed;
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwx_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+/* This defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through IWX_CALIBRATION_CFG_CMD.
+ */
+#define IWX_CALIB_CFG_XTAL_IDX (1 << 0)
+#define IWX_CALIB_CFG_TEMPERATURE_IDX (1 << 1)
+#define IWX_CALIB_CFG_VOLTAGE_READ_IDX (1 << 2)
+#define IWX_CALIB_CFG_PAPD_IDX (1 << 3)
+#define IWX_CALIB_CFG_TX_PWR_IDX (1 << 4)
+#define IWX_CALIB_CFG_DC_IDX (1 << 5)
+#define IWX_CALIB_CFG_BB_FILTER_IDX (1 << 6)
+#define IWX_CALIB_CFG_LO_LEAKAGE_IDX (1 << 7)
+#define IWX_CALIB_CFG_TX_IQ_IDX (1 << 8)
+#define IWX_CALIB_CFG_TX_IQ_SKEW_IDX (1 << 9)
+#define IWX_CALIB_CFG_RX_IQ_IDX (1 << 10)
+#define IWX_CALIB_CFG_RX_IQ_SKEW_IDX (1 << 11)
+#define IWX_CALIB_CFG_SENSITIVITY_IDX (1 << 12)
+#define IWX_CALIB_CFG_CHAIN_NOISE_IDX (1 << 13)
+#define IWX_CALIB_CFG_DISCONNECTED_ANT_IDX (1 << 14)
+#define IWX_CALIB_CFG_ANT_COUPLING_IDX (1 << 15)
+#define IWX_CALIB_CFG_DAC_IDX (1 << 16)
+#define IWX_CALIB_CFG_ABS_IDX (1 << 17)
+#define IWX_CALIB_CFG_AGC_IDX (1 << 18)
+
+/*
+ * Phy configuration command.
+ */
+struct iwx_phy_cfg_cmd {
+ uint32_t phy_cfg;
+ struct iwx_calib_ctrl calib_control;
+} __packed;
+
+#define IWX_PHY_CFG_RADIO_TYPE ((1 << 0) | (1 << 1))
+#define IWX_PHY_CFG_RADIO_STEP ((1 << 2) | (1 << 3))
+#define IWX_PHY_CFG_RADIO_DASH ((1 << 4) | (1 << 5))
+#define IWX_PHY_CFG_PRODUCT_NUMBER ((1 << 6) | (1 << 7))
+#define IWX_PHY_CFG_TX_CHAIN_A (1 << 8)
+#define IWX_PHY_CFG_TX_CHAIN_B (1 << 9)
+#define IWX_PHY_CFG_TX_CHAIN_C (1 << 10)
+#define IWX_PHY_CFG_RX_CHAIN_A (1 << 12)
+#define IWX_PHY_CFG_RX_CHAIN_B (1 << 13)
+#define IWX_PHY_CFG_RX_CHAIN_C (1 << 14)
+
+#define IWX_NVM_VERSION 0
+
+/* 8k family NVM HW-Section offset (in words) definitions */
+#define IWX_HW_ADDR0_WFPM_8000 0x12
+#define IWX_HW_ADDR1_WFPM_8000 0x16
+#define IWX_HW_ADDR0_PCIE_8000 0x8A
+#define IWX_HW_ADDR1_PCIE_8000 0x8E
+#define IWX_MAC_ADDRESS_OVERRIDE_8000 1
+
+/* 8k family NVM SW-Section offset (in words) definitions */
+#define IWX_NVM_SW_SECTION_8000 0x1C0
+#define IWX_NVM_VERSION_8000 0
+#define IWX_RADIO_CFG_8000 0
+#define IWX_SKU_8000 2
+#define IWX_N_HW_ADDRS_8000 3
+
+/* 8k family NVM REGULATORY -Section offset (in words) definitions */
+#define IWX_NVM_CHANNELS_8000 0
+#define IWX_NVM_LAR_OFFSET_8000_OLD 0x4C7
+#define IWX_NVM_LAR_OFFSET_8000 0x507
+#define IWX_NVM_LAR_ENABLED_8000 0x7
+
+/* 8k family NVM calibration section offset (in words) definitions */
+#define IWX_NVM_CALIB_SECTION_8000 0x2B8
+#define IWX_XTAL_CALIB_8000 (0x316 - IWX_NVM_CALIB_SECTION_8000)
+
+/* SKU Capabilities (actual values from NVM definition) */
+#define IWX_NVM_SKU_CAP_BAND_24GHZ (1 << 0)
+#define IWX_NVM_SKU_CAP_BAND_52GHZ (1 << 1)
+#define IWX_NVM_SKU_CAP_11N_ENABLE (1 << 2)
+#define IWX_NVM_SKU_CAP_11AC_ENABLE (1 << 3)
+#define IWX_NVM_SKU_CAP_MIMO_DISABLE (1 << 5)
+
+/* radio config bits (actual values from NVM definition) */
+#define IWX_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define IWX_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define IWX_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define IWX_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define IWX_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define IWX_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define IWX_NVM_RF_CFG_PNUM_MSK_8000(x) (x & 0xF)
+#define IWX_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
+#define IWX_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
+#define IWX_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
+#define IWX_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
+#define IWX_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
+
+/*
+ * channel flags in NVM
+ * @IWX_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @IWX_NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @IWX_NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @IWX_NVM_CHANNEL_RADAR: radar detection required
+ * @IWX_NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @IWX_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
+ */
+#define IWX_NVM_CHANNEL_VALID (1 << 0)
+#define IWX_NVM_CHANNEL_IBSS (1 << 1)
+#define IWX_NVM_CHANNEL_ACTIVE (1 << 3)
+#define IWX_NVM_CHANNEL_RADAR (1 << 4)
+#define IWX_NVM_CHANNEL_DFS (1 << 7)
+#define IWX_NVM_CHANNEL_WIDE (1 << 8)
+#define IWX_NVM_CHANNEL_40MHZ (1 << 9)
+#define IWX_NVM_CHANNEL_80MHZ (1 << 10)
+#define IWX_NVM_CHANNEL_160MHZ (1 << 11)
+
+/* Target of the IWX_NVM_ACCESS_CMD */
+#define IWX_NVM_ACCESS_TARGET_CACHE 0
+#define IWX_NVM_ACCESS_TARGET_OTP 1
+#define IWX_NVM_ACCESS_TARGET_EEPROM 2
+
+/* Section types for IWX_NVM_ACCESS_CMD */
+#define IWX_NVM_SECTION_TYPE_SW 1
+#define IWX_NVM_SECTION_TYPE_PAPD 2
+#define IWX_NVM_SECTION_TYPE_REGULATORY 3
+#define IWX_NVM_SECTION_TYPE_CALIBRATION 4
+#define IWX_NVM_SECTION_TYPE_PRODUCTION 5
+#define IWX_NVM_SECTION_TYPE_POST_FCS_CALIB 6
+/* 7 unknown */
+#define IWX_NVM_SECTION_TYPE_REGULATORY_SDP 8
+/* 9 unknown */
+#define IWX_NVM_SECTION_TYPE_HW_8000 10
+#define IWX_NVM_SECTION_TYPE_MAC_OVERRIDE 11
+#define IWX_NVM_SECTION_TYPE_PHY_SKU 12
+#define IWX_NVM_NUM_OF_SECTIONS 13
+
+/**
+ * enum iwx_nvm_type - nvm formats
+ * @IWX_NVM: the regular format
+ * @IWX_NVM_EXT: extended NVM format
+ */
+enum iwx_nvm_type {
+ IWX_NVM,
+ IWX_NVM_EXT,
+};
+
+/**
+ * struct iwx_nvm_access_cmd_ver2 - Request the device to send an NVM section
+ * @op_code: 0 - read, 1 - write
+ * @target: IWX_NVM_ACCESS_TARGET_*
+ * @type: IWX_NVM_SECTION_TYPE_*
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwx_nvm_access_cmd {
+ uint8_t op_code;
+ uint8_t target;
+ uint16_t type;
+ uint16_t offset;
+ uint16_t length;
+ uint8_t data[];
+} __packed; /* IWX_NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ * @reserved: reserved
+ */
+struct iwx_nvm_access_complete_cmd {
+ uint32_t reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_nvm_access_resp_ver2 - response to IWX_NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: IWX_NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwx_nvm_access_resp {
+ uint16_t offset;
+ uint16_t length;
+ uint16_t type;
+ uint16_t status;
+ uint8_t data[];
+} __packed; /* IWX_NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+#define IWX_ALIVE_STATUS_ERR 0xDEAD
+#define IWX_ALIVE_STATUS_OK 0xCAFE
+
+struct iwx_lmac_debug_addrs {
+ uint32_t error_event_table_ptr; /* SRAM address for error log */
+ uint32_t log_event_table_ptr; /* SRAM address for LMAC event log */
+ uint32_t cpu_register_ptr;
+ uint32_t dbgm_config_ptr;
+ uint32_t alive_counter_ptr;
+ uint32_t scd_base_ptr; /* SRAM address for SCD */
+ uint32_t st_fwrd_addr; /* pointer to Store and forward */
+ uint32_t st_fwrd_size;
+} __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */
+
+struct iwx_lmac_alive {
+ uint32_t ucode_major;
+ uint32_t ucode_minor;
+ uint8_t ver_subtype;
+ uint8_t ver_type;
+ uint8_t mac;
+ uint8_t opt;
+ uint32_t timestamp;
+ struct iwx_lmac_debug_addrs dbg_ptrs;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwx_umac_debug_addrs {
+ uint32_t error_info_addr; /* SRAM address for UMAC error log */
+ uint32_t dbg_print_buff_addr;
+} __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */
+
+struct iwx_umac_alive {
+ uint32_t umac_major; /* UMAC version: major */
+ uint32_t umac_minor; /* UMAC version: minor */
+ struct iwx_umac_debug_addrs dbg_ptrs;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct iwx_alive_resp_v4 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_4 */
+
+/**
+ * commands driver may send before finishing init flow
+ * @IWX_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWX_INIT_NVM: driver is going to send NVM_ACCESS commands
+ */
+#define IWX_INIT_DEBUG_CFG (1 << 0)
+#define IWX_INIT_NVM (1 << 1)
+
+/**
+ * struct iwx_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: IWX_INIT_* flag bits
+ */
+struct iwx_init_extended_cfg_cmd {
+ uint32_t init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
+/* Error response/notification */
+#define IWX_FW_ERR_UNKNOWN_CMD 0x0
+#define IWX_FW_ERR_INVALID_CMD_PARAM 0x1
+#define IWX_FW_ERR_SERVICE 0x2
+#define IWX_FW_ERR_ARC_MEMORY 0x3
+#define IWX_FW_ERR_ARC_CODE 0x4
+#define IWX_FW_ERR_WATCH_DOG 0x5
+#define IWX_FW_ERR_WEP_GRP_KEY_INDX 0x10
+#define IWX_FW_ERR_WEP_KEY_SIZE 0x11
+#define IWX_FW_ERR_OBSOLETE_FUNC 0x12
+#define IWX_FW_ERR_UNEXPECTED 0xFE
+#define IWX_FW_ERR_FATAL 0xFF
+
+/**
+ * struct iwx_error_resp - FW error indication
+ * ( IWX_REPLY_ERROR = 0x2 )
+ * @error_type: one of IWX_FW_ERR_*
+ * @cmd_id: the command ID for which the error occured
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwx_error_resp {
+ uint32_t error_type;
+ uint8_t cmd_id;
+ uint8_t reserved1;
+ uint16_t bad_cmd_seq_num;
+ uint32_t error_service;
+ uint64_t timestamp;
+} __packed;
+
+enum iwx_fw_dbg_reg_operator {
+ CSR_ASSIGN,
+ CSR_SETBIT,
+ CSR_CLEARBIT,
+
+ PRPH_ASSIGN,
+ PRPH_SETBIT,
+ PRPH_CLEARBIT,
+
+ INDIRECT_ASSIGN,
+ INDIRECT_SETBIT,
+ INDIRECT_CLEARBIT,
+
+ PRPH_BLOCKBIT,
+};
+
+/**
+ * struct iwx_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: &enum iwx_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwx_fw_dbg_reg_op {
+ uint8_t op;
+ uint8_t reserved[3];
+ uint32_t addr;
+ uint32_t val;
+} __packed;
+
+/**
+ * enum iwx_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
+ */
+enum iwx_fw_dbg_monitor_mode {
+ SMEM_MODE = 0,
+ EXTERNAL_MODE = 1,
+ MARBH_MODE = 2,
+ MIPI_MODE = 3,
+};
+
+/**
+ * struct iwx_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: the memory segment type to record
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWX_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwx_fw_dbg_mem_seg_tlv {
+ uint32_t data_type;
+ uint32_t ofs;
+ uint32_t len;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: &enum iwx_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg: addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWX_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwx_fw_dbg_dest_tlv_v1 {
+ uint8_t version;
+ uint8_t monitor_mode;
+ uint8_t size_power;
+ uint8_t reserved;
+ uint32_t base_reg;
+ uint32_t end_reg;
+ uint32_t write_ptr_reg;
+ uint32_t wrap_count;
+ uint8_t base_shift;
+ uint8_t end_shift;
+ struct iwx_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWX_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWX_LDBG_M2S_BUF_BA_MSK 0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWX_M2S_UNIT_SIZE 0x100
+
+struct iwx_fw_dbg_dest_tlv {
+ uint8_t version;
+ uint8_t monitor_mode;
+ uint8_t size_power;
+ uint8_t reserved;
+ uint32_t cfg_reg;
+ uint32_t write_ptr_reg;
+ uint32_t wrap_count;
+ uint8_t base_shift;
+ uint8_t size_shift;
+ struct iwx_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+struct iwx_fw_dbg_conf_hcmd {
+ uint8_t id;
+ uint8_t reserved;
+ uint16_t len;
+ uint8_t data[0];
+} __packed;
+
+/**
+ * enum iwx_fw_dbg_trigger_mode - triggers functionalities
+ *
+ * @IWX_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWX_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWX_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ * collect only monitor data
+ */
+enum iwx_fw_dbg_trigger_mode {
+ IWX_FW_DBG_TRIGGER_START = (1 << 0),
+ IWX_FW_DBG_TRIGGER_STOP = (1 << 1),
+ IWX_FW_DBG_TRIGGER_MONITOR_ONLY = (1 << 2),
+};
+
+/**
+ * enum iwx_fw_dbg_trigger_flags - the flags supported by wrt triggers
+ * @IWX_FW_DBG_FORCE_RESTART: force a firmware restart
+ */
+enum iwx_fw_dbg_trigger_flags {
+ IWX_FW_DBG_FORCE_RESTART = (1 << 0),
+};
+
+/**
+ * enum iwx_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWX_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWX_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWX_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWX_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ * @IWX_FW_DBG_CONF_VIF_NAN: NAN device
+ */
+enum iwx_fw_dbg_trigger_vif_type {
+ IWX_FW_DBG_CONF_VIF_ANY = 0,
+ IWX_FW_DBG_CONF_VIF_IBSS = 1,
+ IWX_FW_DBG_CONF_VIF_STATION = 2,
+ IWX_FW_DBG_CONF_VIF_AP = 3,
+ IWX_FW_DBG_CONF_VIF_P2P_CLIENT = 8,
+ IWX_FW_DBG_CONF_VIF_P2P_GO = 9,
+ IWX_FW_DBG_CONF_VIF_P2P_DEVICE = 10,
+ IWX_FW_DBG_CONF_VIF_NAN = 12,
+};
+
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ * This should not be defined as a trigger to the driver, but a value the
+ * driver should set to indicate that the trigger was initiated by the
+ * user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ * missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ * command response or a notification.
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ * goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ * detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ * events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
+ * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
+ * threshold.
+ * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ * the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_USER_EXTENDED: trigger log collection upon user space
+ * request.
+ * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
+ * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
+ * in the driver.
+ */
+enum iwx_fw_dbg_trigger {
+ IWX_FW_DBG_TRIGGER_INVALID = 0,
+ IWX_FW_DBG_TRIGGER_USER,
+ IWX_FW_DBG_TRIGGER_FW_ASSERT,
+ IWX_FW_DBG_TRIGGER_MISSED_BEACONS,
+ IWX_FW_DBG_TRIGGER_CHANNEL_SWITCH,
+ IWX_FW_DBG_TRIGGER_FW_NOTIF,
+ IWX_FW_DBG_TRIGGER_MLME,
+ IWX_FW_DBG_TRIGGER_STATS,
+ IWX_FW_DBG_TRIGGER_RSSI,
+ IWX_FW_DBG_TRIGGER_TXQ_TIMERS,
+ IWX_FW_DBG_TRIGGER_TIME_EVENT,
+ IWX_FW_DBG_TRIGGER_BA,
+ IWX_FW_DBG_TRIGGER_TX_LATENCY,
+ IWX_FW_DBG_TRIGGER_TDLS,
+ IWX_FW_DBG_TRIGGER_TX_STATUS,
+ IWX_FW_DBG_TRIGGER_USER_EXTENDED,
+ IWX_FW_DBG_TRIGGER_ALIVE_TIMEOUT,
+ IWX_FW_DBG_TRIGGER_DRIVER,
+
+ /* must be last */
+ IWX_FW_DBG_TRIGGER_MAX,
+};
+
+
+/**
+ * struct iwx_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: &enum iwx_fw_dbg_trigger
+ * @vif_type: &enum iwx_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ * if the mode is %IWX_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ * to the currently running configuration is set, the data should be
+ * collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ * after the STOP trigger fires.
+ * @mode: &enum iwx_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWX_FW_DBG_TRIGGER_START, this defines what
+ * configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ * @trig_dis_ms: the time, in milliseconds, after an occurrence of this
+ * trigger in which another occurrence should be ignored.
+ * @flags: &enum iwx_fw_dbg_trigger_flags
+ */
+struct iwx_fw_dbg_trigger_tlv {
+ uint32_t id;
+ uint32_t vif_type;
+ uint32_t stop_conf_ids;
+ uint32_t stop_delay;
+ uint8_t mode;
+ uint8_t start_conf_id;
+ uint16_t occurrences;
+ uint16_t trig_dis_ms;
+ uint8_t flags;
+ uint8_t reserved[5];
+
+ uint8_t data[0];
+} __packed;
+
+#define IWX_FW_DBG_START_FROM_ALIVE 0
+#define IWX_FW_DBG_CONF_MAX 32
+#define IWX_FW_DBG_INVALID 0xff
+
+/**
+ * struct iwx_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwx_fw_dbg_trigger_missed_bcon {
+ uint32_t stop_consec_missed_bcon;
+ uint32_t stop_consec_missed_bcon_since_rx;
+ uint32_t reserved2[2];
+ uint32_t start_consec_missed_bcon;
+ uint32_t start_consec_missed_bcon_since_rx;
+ uint32_t reserved1[2];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_cmd {
+ struct cmd {
+ uint8_t cmd_id;
+ uint8_t group_id;
+ } __packed cmds[16];
+} __packed;
+
+/**
+ * iwx_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwx_fw_dbg_trigger_stats {
+ uint32_t stop_offset;
+ uint32_t stop_threshold;
+ uint32_t start_offset;
+ uint32_t start_threshold;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwx_fw_dbg_trigger_low_rssi {
+ uint32_t rssi;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwx_fw_dbg_trigger_mlme {
+ uint8_t stop_auth_denied;
+ uint8_t stop_auth_timeout;
+ uint8_t stop_rx_deauth;
+ uint8_t stop_tx_deauth;
+
+ uint8_t stop_assoc_denied;
+ uint8_t stop_assoc_timeout;
+ uint8_t stop_connection_loss;
+ uint8_t reserved;
+
+ uint8_t start_auth_denied;
+ uint8_t start_auth_timeout;
+ uint8_t start_rx_deauth;
+ uint8_t start_tx_deauth;
+
+ uint8_t start_assoc_denied;
+ uint8_t start_assoc_timeout;
+ uint8_t start_connection_loss;
+ uint8_t reserved2;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwx_fw_dbg_trigger_txq_timer {
+ uint32_t command_queue;
+ uint32_t bss;
+ uint32_t softap;
+ uint32_t p2p_go;
+ uint32_t p2p_client;
+ uint32_t p2p_device;
+ uint32_t ibss;
+ uint32_t tdls;
+ uint32_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ * trigger each time a time event notification that relates to time event
+ * id with one of the actions in the bitmap is received and
+ * BIT(notif->status) is set in status_bitmap.
+ *
+ */
+struct iwx_fw_dbg_trigger_time_event {
+ struct {
+ uint32_t id;
+ uint32_t action_bitmap;
+ uint32_t status_bitmap;
+ } __packed time_events[16];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ * when a frame times out in the reodering buffer.
+ */
+struct iwx_fw_dbg_trigger_ba {
+ uint16_t rx_ba_start;
+ uint16_t rx_ba_stop;
+ uint16_t tx_ba_start;
+ uint16_t tx_ba_stop;
+ uint16_t rx_bar;
+ uint16_t tx_bar;
+ uint16_t frame_timeout;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tx_latency - configures tx latency related trigger
+ * @thrshold: the wanted threshold.
+ * @tid_bitmap: the tid to apply the threshold on
+ * @mode: recording mode (Internal buffer or continues recording)
+ * @window: the size of the window before collecting.
+ * @reserved: reserved.
+ */
+struct iwx_fw_dbg_trigger_tx_latency {
+ uint32_t thrshold;
+ uint16_t tid_bitmap;
+ uint16_t mode;
+ uint32_t window;
+ uint32_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tdls - configures trigger for TDLS events.
+ * @action_bitmap: the TDLS action to trigger the collection upon
+ * @peer_mode: trigger on specific peer or all
+ * @peer: the TDLS peer to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_tdls {
+ uint8_t action_bitmap;
+ uint8_t peer_mode;
+ uint8_t peer[ETHER_ADDR_LEN];
+ uint8_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tx_status - configures trigger for tx response
+ * status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_tx_status {
+ struct tx_status {
+ uint8_t status;
+ uint8_t reserved[3];
+ } __packed statuses[16];
+ uint32_t reserved[2];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ * If there is more than one HCMD to send, they will appear one after the
+ * other and be sent in the order that they appear in.
+ * This parses IWX_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %IWX_FW_DBG_CONF_MAX configuration per run.
+ */
+struct iwx_fw_dbg_conf_tlv {
+ uint8_t id;
+ uint8_t usniffer;
+ uint8_t reserved;
+ uint8_t num_of_hcmds;
+ struct iwx_fw_dbg_conf_hcmd hcmd;
+} __packed;
+
+#define IWX_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwx_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwx_fw_cmd_version {
+ uint8_t cmd;
+ uint8_t group;
+ uint8_t cmd_ver;
+ uint8_t notif_ver;
+} __packed;
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define IWX_MAX_MACS_IN_BINDING (3)
+#define IWX_MAX_BINDINGS (4)
+#define IWX_AUX_BINDING_INDEX (3)
+#define IWX_MAX_PHYS (4)
+
+/* Used to extract ID and color from the context dword */
+#define IWX_FW_CTXT_ID_POS (0)
+#define IWX_FW_CTXT_ID_MSK (0xff << IWX_FW_CTXT_ID_POS)
+#define IWX_FW_CTXT_COLOR_POS (8)
+#define IWX_FW_CTXT_COLOR_MSK (0xff << IWX_FW_CTXT_COLOR_POS)
+#define IWX_FW_CTXT_INVALID (0xffffffff)
+
+#define IWX_FW_CMD_ID_AND_COLOR(_id, _color) ((_id << IWX_FW_CTXT_ID_POS) |\
+ (_color << IWX_FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+#define IWX_FW_CTXT_ACTION_STUB 0
+#define IWX_FW_CTXT_ACTION_ADD 1
+#define IWX_FW_CTXT_ACTION_MODIFY 2
+#define IWX_FW_CTXT_ACTION_REMOVE 3
+#define IWX_FW_CTXT_ACTION_NUM 4
+/* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+
+/* BSS Station Events */
+#define IWX_TE_BSS_STA_AGGRESSIVE_ASSOC 0
+#define IWX_TE_BSS_STA_ASSOC 1
+#define IWX_TE_BSS_EAP_DHCP_PROT 2
+#define IWX_TE_BSS_QUIET_PERIOD 3
+
+/* P2P Device Events */
+#define IWX_TE_P2P_DEVICE_DISCOVERABLE 4
+#define IWX_TE_P2P_DEVICE_LISTEN 5
+#define IWX_TE_P2P_DEVICE_ACTION_SCAN 6
+#define IWX_TE_P2P_DEVICE_FULL_SCAN 7
+
+/* P2P Client Events */
+#define IWX_TE_P2P_CLIENT_AGGRESSIVE_ASSOC 8
+#define IWX_TE_P2P_CLIENT_ASSOC 9
+#define IWX_TE_P2P_CLIENT_QUIET_PERIOD 10
+
+/* P2P GO Events */
+#define IWX_TE_P2P_GO_ASSOC_PROT 11
+#define IWX_TE_P2P_GO_REPETITIVE_NOA 12
+#define IWX_TE_P2P_GO_CT_WINDOW 13
+
+/* WiDi Sync Events */
+#define IWX_TE_WIDI_TX_SYNC 14
+
+/* Time event - defines for command API */
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/*
+ * @IWX_TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @IWX_TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @IWX_TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @IWX_TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+#define IWX_TE_V2_FRAG_NONE 0
+#define IWX_TE_V2_FRAG_SINGLE 1
+#define IWX_TE_V2_FRAG_DUAL 2
+#define IWX_TE_V2_FRAG_MAX 0xfe
+#define IWX_TE_V2_FRAG_ENDLESS 0xff
+
+/* Repeat the time event endlessly (until removed) */
+#define IWX_TE_V2_REPEAT_ENDLESS 0xff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define IWX_TE_V2_REPEAT_MAX 0xfe
+
+#define IWX_TE_V2_PLACEMENT_POS 12
+#define IWX_TE_V2_ABSENCE_POS 15
+
+/* Time event policy values
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @IWX_TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @IWX_TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @IWX_TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @IWX_TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @IWX_TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @IWX_TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @IWX_TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @IWX_TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @IWX_TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @IWX_TE_V2_DEP_OTHER: depends on another time event
+ * @IWX_TE_V2_DEP_TSF: depends on a specific time
+ * @IWX_TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @IWX_TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+#define IWX_TE_V2_DEFAULT_POLICY 0x0
+
+/* notifications (event start/stop, fragment start/stop) */
+#define IWX_TE_V2_NOTIF_HOST_EVENT_START (1 << 0)
+#define IWX_TE_V2_NOTIF_HOST_EVENT_END (1 << 1)
+#define IWX_TE_V2_NOTIF_INTERNAL_EVENT_START (1 << 2)
+#define IWX_TE_V2_NOTIF_INTERNAL_EVENT_END (1 << 3)
+
+#define IWX_TE_V2_NOTIF_HOST_FRAG_START (1 << 4)
+#define IWX_TE_V2_NOTIF_HOST_FRAG_END (1 << 5)
+#define IWX_TE_V2_NOTIF_INTERNAL_FRAG_START (1 << 6)
+#define IWX_TE_V2_NOTIF_INTERNAL_FRAG_END (1 << 7)
+#define IWX_T2_V2_START_IMMEDIATELY (1 << 11)
+
+#define IWX_TE_V2_NOTIF_MSK 0xff
+
+/* placement characteristics */
+#define IWX_TE_V2_DEP_OTHER (1 << IWX_TE_V2_PLACEMENT_POS)
+#define IWX_TE_V2_DEP_TSF (1 << (IWX_TE_V2_PLACEMENT_POS + 1))
+#define IWX_TE_V2_EVENT_SOCIOPATHIC (1 << (IWX_TE_V2_PLACEMENT_POS + 2))
+
+/* are we present or absent during the Time Event. */
+#define IWX_TE_V2_ABSENCE (1 << IWX_TE_V2_ABSENCE_POS)
+
+/**
+ * struct iwx_time_event_cmd_api - configuring Time Events
+ * with struct IWX_MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWX_UCODE_TLV_FLAGS)
+ * ( IWX_TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be IWX_TE_REPEAT_ENDLESS
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of IWX_TE_INDEPENDENT, IWX_TE_DEP_OTHER, IWX_TE_DEP_TSF
+ * IWX_TE_EVENT_SOCIOPATHIC
+ * using IWX_TE_ABSENCE and using IWX_TE_NOTIF_*
+ */
+struct iwx_time_event_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t id;
+ /* IWX_MAC_TIME_EVENT_DATA_API_S_VER_2 */
+ uint32_t apply_time;
+ uint32_t max_delay;
+ uint32_t depends_on;
+ uint32_t interval;
+ uint32_t duration;
+ uint8_t repeat;
+ uint8_t max_frags;
+ uint16_t policy;
+} __packed; /* IWX_MAC_TIME_EVENT_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_time_event_resp - response structure to iwx_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwx_time_event_resp {
+ uint32_t status;
+ uint32_t id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+} __packed; /* IWX_MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwx_time_event_notif - notifications of time event start/stop
+ * ( IWX_TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of IWX_TE_NOTIF_START or IWX_TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwx_time_event_notif {
+ uint32_t timestamp;
+ uint32_t session_id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t status;
+} __packed; /* IWX_MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwx_binding_cmd - configuring bindings
+ * ( IWX_BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
+ */
+struct iwx_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_BINDING_DATA_API_S_VER_1 */
+ uint32_t macs[IWX_MAX_MACS_IN_BINDING];
+ uint32_t phy;
+ uint32_t lmac_id;
+} __packed; /* IWX_BINDING_CMD_API_S_VER_2 */
+
+#define IWX_LMAC_24G_INDEX 0
+#define IWX_LMAC_5G_INDEX 1
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWX_MAX_QUOTA 128
+
+/**
+ * struct iwx_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwx_time_quota_data {
+ uint32_t id_and_color;
+ uint32_t quota;
+ uint32_t max_duration;
+} __packed; /* IWX_TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_time_quota_cmd - configuration of time quota between bindings
+ * ( IWX_TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwx_time_quota_cmd {
+ struct iwx_time_quota_data quotas[IWX_MAX_BINDINGS];
+} __packed; /* IWX_TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define IWX_PHY_BAND_5 (0)
+#define IWX_PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define IWX_PHY_VHT_CHANNEL_MODE20 (0x0)
+#define IWX_PHY_VHT_CHANNEL_MODE40 (0x1)
+#define IWX_PHY_VHT_CHANNEL_MODE80 (0x2)
+#define IWX_PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define IWX_PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define IWX_PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define IWX_PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define IWX_PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define IWX_PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define IWX_PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define IWX_PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define IWX_PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * @band: IWX_PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwx_fw_channel_info_v1 {
+ uint8_t band;
+ uint8_t channel;
+ uint8_t width;
+ uint8_t ctrl_pos;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwx_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwx_fw_channel_info {
+ uint32_t channel;
+ uint8_t band;
+ uint8_t width;
+ uint8_t ctrl_pos;
+ uint8_t reserved;
+} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
+
+#define IWX_PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define IWX_PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << IWX_PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define IWX_PHY_RX_CHAIN_VALID_POS (1)
+#define IWX_PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_VALID_POS)
+#define IWX_PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define IWX_PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_FORCE_SEL_POS)
+#define IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define IWX_PHY_RX_CHAIN_CNT_POS (10)
+#define IWX_PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << IWX_PHY_RX_CHAIN_CNT_POS)
+#define IWX_PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define IWX_PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << IWX_PHY_RX_CHAIN_MIMO_CNT_POS)
+#define IWX_PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define IWX_PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << IWX_PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define IWX_NUM_PHY_CTX 3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwx_phy_context_cmd - config of the PHY context
+ * ( IWX_PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwx_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwx_fw_channel_info ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWX_PHY_CONTEXT_CMD_API_VER_1 */
+
+#define IWX_RX_INFO_PHY_CNT 8
+#define IWX_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWX_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWX_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWX_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWX_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWX_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWX_RX_INFO_ENERGY_ANT_C_POS 16
+
+#define IWX_RX_INFO_AGC_IDX 1
+#define IWX_RX_INFO_RSSI_AB_IDX 2
+#define IWX_OFDM_AGC_A_MSK 0x0000007f
+#define IWX_OFDM_AGC_A_POS 0
+#define IWX_OFDM_AGC_B_MSK 0x00003f80
+#define IWX_OFDM_AGC_B_POS 7
+#define IWX_OFDM_AGC_CODE_MSK 0x3fe00000
+#define IWX_OFDM_AGC_CODE_POS 20
+#define IWX_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWX_OFDM_RSSI_A_POS 0
+#define IWX_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWX_OFDM_RSSI_ALLBAND_A_POS 8
+#define IWX_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWX_OFDM_RSSI_B_POS 16
+#define IWX_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWX_OFDM_RSSI_ALLBAND_B_POS 24
+
+/**
+ * struct iwx_rx_phy_info - phy info
+ * (IWX_REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: IWX_RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwx_rx_phy_info {
+ uint8_t non_cfg_phy_cnt;
+ uint8_t cfg_phy_cnt;
+ uint8_t stat_id;
+ uint8_t reserved1;
+ uint32_t system_timestamp;
+ uint64_t timestamp;
+ uint32_t beacon_time_stamp;
+ uint16_t phy_flags;
+#define IWX_PHY_INFO_FLAG_SHPREAMBLE (1 << 2)
+ uint16_t channel;
+ uint32_t non_cfg_phy[IWX_RX_INFO_PHY_CNT];
+ uint32_t rate_n_flags;
+ uint32_t byte_count;
+ uint16_t mac_active_msk;
+ uint16_t frame_time;
+} __packed;
+
+struct iwx_rx_mpdu_res_start {
+ uint16_t byte_count;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * Values to parse %iwx_rx_phy_info phy_flags
+ * @IWX_RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @IWX_RX_RES_PHY_FLAGS_MOD_CCK:
+ * @IWX_RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @IWX_RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @IWX_RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @IWX_RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+#define IWX_RX_RES_PHY_FLAGS_BAND_24 (1 << 0)
+#define IWX_RX_RES_PHY_FLAGS_MOD_CCK (1 << 1)
+#define IWX_RX_RES_PHY_FLAGS_SHORT_PREAMBLE (1 << 2)
+#define IWX_RX_RES_PHY_FLAGS_NARROW_BAND (1 << 3)
+#define IWX_RX_RES_PHY_FLAGS_ANTENNA (0x7 << 4)
+#define IWX_RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define IWX_RX_RES_PHY_FLAGS_AGG (1 << 7)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_HT (1 << 8)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_GF (1 << 9)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_VHT (1 << 10)
+
+/**
+ * Values written by fw for each Rx packet
+ * @IWX_RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @IWX_RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @IWX_RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_VALID:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @IWX_RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @IWX_RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @IWX_RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @IWX_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ * %IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @IWX_RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @IWX_RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @IWX_RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @IWX_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @IWX_RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @IWX_RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @IWX_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @IWX_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @IWX_RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @IWX_RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @IWX_RX_MPDU_RES_STATUS_RRF_KILL:
+ * @IWX_RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @IWX_RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+#define IWX_RX_MPDU_RES_STATUS_CRC_OK (1 << 0)
+#define IWX_RX_MPDU_RES_STATUS_OVERRUN_OK (1 << 1)
+#define IWX_RX_MPDU_RES_STATUS_SRC_STA_FOUND (1 << 2)
+#define IWX_RX_MPDU_RES_STATUS_KEY_VALID (1 << 3)
+#define IWX_RX_MPDU_RES_STATUS_KEY_PARAM_OK (1 << 4)
+#define IWX_RX_MPDU_RES_STATUS_ICV_OK (1 << 5)
+#define IWX_RX_MPDU_RES_STATUS_MIC_OK (1 << 6)
+#define IWX_RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
+#define IWX_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR (1 << 7)
+#define IWX_RX_MPDU_RES_STATUS_SEC_NO_ENC (0 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_WEP_ENC (1 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC (2 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_TKIP_ENC (3 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_EXT_ENC (4 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC (6 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_ENC_ERR (7 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK (7 << 8)
+#define IWX_RX_MPDU_RES_STATUS_DEC_DONE (1 << 11)
+#define IWX_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP (1 << 12)
+#define IWX_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP (1 << 13)
+#define IWX_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT (1 << 14)
+#define IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME (1 << 15)
+#define IWX_RX_MPDU_RES_STATUS_HASH_INDEX_MSK (0x3F0000)
+#define IWX_RX_MPDU_RES_STATUS_STA_ID_MSK (0x1f000000)
+#define IWX_RX_MPDU_RES_STATUS_RRF_KILL (1 << 29)
+#define IWX_RX_MPDU_RES_STATUS_FILTERING_MSK (0xc00000)
+#define IWX_RX_MPDU_RES_STATUS2_FILTERING_MSK (0xc0000000)
+
+#define IWX_RX_MPDU_MFLG1_ADDRTYPE_MASK 0x03
+#define IWX_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK 0xf0
+#define IWX_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT 3
+
+#define IWX_RX_MPDU_MFLG2_HDR_LEN_MASK 0x1f
+#define IWX_RX_MPDU_MFLG2_PAD 0x20
+#define IWX_RX_MPDU_MFLG2_AMSDU 0x40
+
+#define IWX_RX_MPDU_PHY_AMPDU (1 << 5)
+#define IWX_RX_MPDU_PHY_AMPDU_TOGGLE (1 << 6)
+#define IWX_RX_MPDU_PHY_SHORT_PREAMBLE (1 << 7)
+#define IWX_RX_MPDU_PHY_NCCK_ADDTL_NTFY (1 << 7)
+#define IWX_RX_MPDU_PHY_TSF_OVERLOAD (1 << 8)
+
+struct iwx_rx_mpdu_desc_v1 {
+ union {
+ uint32_t rss_hash;
+ uint32_t phy_data2;
+ };
+ union {
+ uint32_t filter_match;
+ uint32_t phy_data3;
+ };
+ uint32_t rate_n_flags;
+ uint8_t energy_a;
+ uint8_t energy_b;
+ uint8_t channel;
+ uint8_t mac_context;
+ uint32_t gp2_on_air_rise;
+ union {
+ uint64_t tsf_on_air_rise;
+ struct {
+ uint32_t phy_data0;
+ uint32_t phy_data1;
+ };
+ };
+} __packed;
+
+struct iwx_rx_mpdu_desc {
+ uint16_t mpdu_len;
+ uint8_t mac_flags1;
+ uint8_t mac_flags2;
+ uint8_t amsdu_info;
+ uint16_t phy_info;
+ uint8_t mac_phy_idx;
+ uint16_t raw_csum;
+ union {
+ uint16_t l3l4_flags;
+ uint16_t phy_data4;
+ };
+ uint16_t status;
+ uint8_t hash_filter;
+ uint8_t sta_id_flags;
+ uint32_t reorder_data;
+ struct iwx_rx_mpdu_desc_v1 v1;
+} __packed;
+
+/**
+ * struct iwx_radio_version_notif - information on the radio version
+ * ( IWX_RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwx_radio_version_notif {
+ uint32_t radio_flavor;
+ uint32_t radio_step;
+ uint32_t radio_dash;
+} __packed; /* IWX_RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+#define IWX_CARD_ENABLED 0x00
+#define IWX_HW_CARD_DISABLED 0x01
+#define IWX_SW_CARD_DISABLED 0x02
+#define IWX_CT_KILL_CARD_DISABLED 0x04
+#define IWX_HALT_CARD_DISABLED 0x08
+#define IWX_CARD_DISABLED_MSK 0x0f
+#define IWX_CARD_IS_RX_ON 0x10
+
+/**
+ * struct iwx_radio_version_notif - information on the radio version
+ * (IWX_CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwx_card_state_flags
+ */
+struct iwx_card_state_notif {
+ uint32_t flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwx_missed_beacons_notif - information on missed beacons
+ * ( IWX_MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons:
+ * @num_recvd_beacons:
+ */
+struct iwx_missed_beacons_notif {
+ uint32_t mac_id;
+ uint32_t consec_missed_beacons_since_last_rx;
+ uint32_t consec_missed_beacons;
+ uint32_t num_expected_beacons;
+ uint32_t num_recvd_beacons;
+} __packed; /* IWX_MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwx_mfuart_load_notif - mfuart image version & status
+ * ( IWX_MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwx_mfuart_load_notif {
+ uint32_t installed_ver;
+ uint32_t external_ver;
+ uint32_t status;
+ uint32_t duration;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+
+/**
+ * struct iwx_set_calib_default_cmd - set default value for calibration.
+ * ( IWX_SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwx_set_calib_default_cmd {
+ uint16_t calib_index;
+ uint16_t length;
+ uint8_t data[0];
+} __packed; /* IWX_PHY_CALIB_OVERRIDE_VALUES_S */
+
+#define IWX_MAX_PORT_ID_NUM 2
+#define IWX_MAX_MCAST_FILTERING_ADDRESSES 256
+
+/**
+ * struct iwx_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * IWX_MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwx_mcast_filter_cmd {
+ uint8_t filter_own;
+ uint8_t port_id;
+ uint8_t count;
+ uint8_t pass_all;
+ uint8_t bssid[6];
+ uint8_t reserved[2];
+ uint8_t addr_list[0];
+} __packed; /* IWX_MCAST_FILTERING_CMD_API_S_VER_1 */
+
+struct iwx_statistics_dbg {
+ uint32_t burst_check;
+ uint32_t burst_count;
+ uint32_t wait_for_silence_timeout_cnt;
+ uint32_t reserved[3];
+} __packed; /* IWX_STATISTICS_DEBUG_API_S_VER_2 */
+
+struct iwx_statistics_div {
+ uint32_t tx_on_a;
+ uint32_t tx_on_b;
+ uint32_t exec_time;
+ uint32_t probe_time;
+ uint32_t rssi_ant;
+ uint32_t reserved2;
+} __packed; /* IWX_STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct iwx_statistics_bt_activity {
+ uint32_t hi_priority_tx_req_cnt;
+ uint32_t hi_priority_tx_denied_cnt;
+ uint32_t lo_priority_tx_req_cnt;
+ uint32_t lo_priority_tx_denied_cnt;
+ uint32_t hi_priority_rx_req_cnt;
+ uint32_t hi_priority_rx_denied_cnt;
+ uint32_t lo_priority_rx_req_cnt;
+ uint32_t lo_priority_rx_denied_cnt;
+} __packed; /* IWX_STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct iwx_statistics_general_common {
+ uint32_t radio_temperature;
+ struct iwx_statistics_dbg dbg;
+ uint32_t sleep_time;
+ uint32_t slots_out;
+ uint32_t slots_idle;
+ uint32_t ttl_timestamp;
+ struct iwx_statistics_div slow_div;
+ uint32_t rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ uint32_t num_of_sos_states;
+ uint32_t beacon_filtered;
+ uint32_t missed_beacons;
+ uint8_t beacon_filter_average_energy;
+ uint8_t beacon_filter_reason;
+ uint8_t beacon_filter_current_energy;
+ uint8_t beacon_filter_reserved;
+ uint32_t beacon_filter_delta_time;
+ struct iwx_statistics_bt_activity bt_activity;
+ uint64_t rx_time;
+ uint64_t on_time_rf;
+ uint64_t on_time_scan;
+ uint64_t tx_time;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct iwx_statistics_rx_non_phy {
+ uint32_t bogus_cts; /* CTS received when not expecting CTS */
+ uint32_t bogus_ack; /* ACK received when not expecting ACK */
+ uint32_t non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ uint32_t filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ uint32_t non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ uint32_t channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ uint32_t num_missed_bcon; /* number of missed beacons */
+ uint32_t adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ uint32_t ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ uint32_t beacon_silence_rssi[3];/* RSSI silence after beacon frame */
+ uint32_t interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ uint32_t channel_load; /* counts RX Enable time in uSec */
+ uint32_t dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ uint32_t beacon_rssi_a;
+ uint32_t beacon_rssi_b;
+ uint32_t beacon_rssi_c;
+ uint32_t beacon_energy_a;
+ uint32_t beacon_energy_b;
+ uint32_t beacon_energy_c;
+ uint32_t num_bt_kills;
+ uint32_t mac_id;
+ uint32_t directed_data_mpdu;
+} __packed; /* IWX_STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct iwx_statistics_rx_phy {
+ uint32_t ina_cnt;
+ uint32_t fina_cnt;
+ uint32_t plcp_err;
+ uint32_t crc32_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t false_alarm_cnt;
+ uint32_t fina_sync_err_cnt;
+ uint32_t sfd_timeout;
+ uint32_t fina_timeout;
+ uint32_t unresponded_rts;
+ uint32_t rxe_frame_limit_overrun;
+ uint32_t sent_ack_cnt;
+ uint32_t sent_cts_cnt;
+ uint32_t sent_ba_rsp_cnt;
+ uint32_t dsp_self_kill;
+ uint32_t mh_format_err;
+ uint32_t re_acq_main_rssi_sum;
+ uint32_t reserved;
+} __packed; /* IWX_STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct iwx_statistics_rx_ht_phy {
+ uint32_t plcp_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t crc32_err;
+ uint32_t mh_format_err;
+ uint32_t agg_crc32_good;
+ uint32_t agg_mpdu_cnt;
+ uint32_t agg_cnt;
+ uint32_t unsupport_mcs;
+} __packed; /* IWX_STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define IWX_MAC_INDEX_AUX 4
+#define IWX_MAC_INDEX_MIN_DRIVER 0
+#define IWX_NUM_MAC_INDEX_DRIVER IWX_MAC_INDEX_AUX
+
+#define IWX_STATION_COUNT 16
+
+#define IWX_MAX_CHAINS 3
+
+struct iwx_statistics_tx_non_phy_agg {
+ uint32_t ba_timeout;
+ uint32_t ba_reschedule_frames;
+ uint32_t scd_query_agg_frame_cnt;
+ uint32_t scd_query_no_agg;
+ uint32_t scd_query_agg;
+ uint32_t scd_query_mismatch;
+ uint32_t frame_not_ready;
+ uint32_t underrun;
+ uint32_t bt_prio_kill;
+ uint32_t rx_ba_rsp_cnt;
+ int8_t txpower[IWX_MAX_CHAINS];
+ int8_t reserved;
+ uint32_t reserved2;
+} __packed; /* IWX_STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct iwx_statistics_tx_channel_width {
+ uint32_t ext_cca_narrow_ch20[1];
+ uint32_t ext_cca_narrow_ch40[2];
+ uint32_t ext_cca_narrow_ch80[3];
+ uint32_t ext_cca_narrow_ch160[4];
+ uint32_t last_tx_ch_width_indx;
+ uint32_t rx_detected_per_ch_width[4];
+ uint32_t success_per_ch_width[4];
+ uint32_t fail_per_ch_width[4];
+}; /* IWX_STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct iwx_statistics_tx {
+ uint32_t preamble_cnt;
+ uint32_t rx_detected_cnt;
+ uint32_t bt_prio_defer_cnt;
+ uint32_t bt_prio_kill_cnt;
+ uint32_t few_bytes_cnt;
+ uint32_t cts_timeout;
+ uint32_t ack_timeout;
+ uint32_t expected_ack_cnt;
+ uint32_t actual_ack_cnt;
+ uint32_t dump_msdu_cnt;
+ uint32_t burst_abort_next_frame_mismatch_cnt;
+ uint32_t burst_abort_missing_next_frame_cnt;
+ uint32_t cts_timeout_collision;
+ uint32_t ack_or_ba_timeout_collision;
+ struct iwx_statistics_tx_non_phy_agg agg;
+ struct iwx_statistics_tx_channel_width channel_width;
+} __packed; /* IWX_STATISTICS_TX_API_S_VER_4 */
+
+struct iwx_statistics_general {
+ struct iwx_statistics_general_common common;
+ uint32_t beacon_counter[IWX_MAC_INDEX_AUX];
+ uint8_t beacon_average_energy[IWX_MAC_INDEX_AUX];
+ uint8_t reserved[8 - IWX_MAC_INDEX_AUX];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct iwx_statistics_rx {
+ struct iwx_statistics_rx_phy ofdm;
+ struct iwx_statistics_rx_phy cck;
+ struct iwx_statistics_rx_non_phy general;
+ struct iwx_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* IWX_STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * IWX_STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * IWX_REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues IWX_REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+/**
+ * struct iwx_statistics_load - RX statistics for multi-queue devices
+ * @air_time: accumulated air time, per mac
+ * @byte_count: accumulated byte count, per mac
+ * @pkt_count: accumulated packet count, per mac
+ * @avg_energy: average RSSI, per station
+ */
+struct iwx_statistics_load {
+ uint32_t air_time[IWX_MAC_INDEX_AUX];
+ uint32_t byte_count[IWX_MAC_INDEX_AUX];
+ uint32_t pkt_count[IWX_MAC_INDEX_AUX];
+ uint8_t avg_energy[IWX_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
+
+struct iwx_notif_statistics {
+ uint32_t flag;
+ struct iwx_statistics_rx rx;
+ struct iwx_statistics_tx tx;
+ struct iwx_statistics_general general;
+ struct iwx_statistics_load load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
+
+
+/**
+ * flags used in statistics notification
+ * @IWX_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
+ */
+#define IWX_STATISTICS_REPLY_FLG_CLEAR 0x01
+
+/**
+ * flags used in statistics command
+ * @IWX_STATISTICS_FLG_CLEAR: request to clear statistics after the report
+ * that's sent after this command
+ * @IWX_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
+ * notifications
+ */
+#define IWX_STATISTICS_FLG_CLEAR 0x01
+#define IWX_STATISTICS_FLG_DISABLE_NOTIF 0x02
+
+/**
+ * struct iwx_statistics_cmd - statistics config command
+ * @flags: IWX_STATISTICS_* flags
+ */
+struct iwx_statistics_cmd {
+ uint32_t flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
+
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+#define IWX_SF_LONG_DELAY_ON 0 /* should never be called by driver */
+#define IWX_SF_FULL_ON 1
+#define IWX_SF_UNINIT 2
+#define IWX_SF_INIT_OFF 3
+#define IWX_SF_HW_NUM_STATES 4
+
+/* Smart Fifo possible scenario */
+#define IWX_SF_SCENARIO_SINGLE_UNICAST 0
+#define IWX_SF_SCENARIO_AGG_UNICAST 1
+#define IWX_SF_SCENARIO_MULTICAST 2
+#define IWX_SF_SCENARIO_BA_RESP 3
+#define IWX_SF_SCENARIO_TX_RESP 4
+#define IWX_SF_NUM_SCENARIO 5
+
+#define IWX_SF_TRANSIENT_STATES_NUMBER 2 /* IWX_SF_LONG_DELAY_ON and IWX_SF_FULL_ON */
+#define IWX_SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define IWX_SF_W_MARK_SISO 4096
+#define IWX_SF_W_MARK_MIMO2 8192
+#define IWX_SF_W_MARK_MIMO3 6144
+#define IWX_SF_W_MARK_LEGACY 4096
+#define IWX_SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
+#define IWX_SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define IWX_SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define IWX_SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define IWX_SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define IWX_SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+#define IWX_SF_CFG_DUMMY_NOTIF_OFF (1 << 16)
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in enum %iwx_sf_state.
+ * @watermark: Minimum allowed availabe free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwx_sf_cfg_cmd {
+ uint32_t state;
+ uint32_t watermark[IWX_SF_TRANSIENT_STATES_NUMBER];
+ uint32_t long_delay_timeouts[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES];
+ uint32_t full_on_timeouts[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES];
+} __packed; /* IWX_SF_CFG_API_S_VER_2 */
+
+#define IWX_AC_BK 0
+#define IWX_AC_BE 1
+#define IWX_AC_VI 2
+#define IWX_AC_VO 3
+#define IWX_AC_NUM 4
+
+/**
+ * MAC context flags
+ * @IWX_MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @IWX_MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @IWX_MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @IWX_MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+#define IWX_MAC_PROT_FLG_TGG_PROTECT (1 << 3)
+#define IWX_MAC_PROT_FLG_HT_PROT (1 << 23)
+#define IWX_MAC_PROT_FLG_FAT_PROT (1 << 24)
+#define IWX_MAC_PROT_FLG_SELF_CTS_EN (1 << 30)
+
+#define IWX_MAC_FLG_SHORT_SLOT (1 << 4)
+#define IWX_MAC_FLG_SHORT_PREAMBLE (1 << 5)
+
+/**
+ * Supported MAC types
+ * @IWX_FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @IWX_FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @IWX_FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @IWX_FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @IWX_FW_MAC_TYPE_IBSS: IBSS
+ * @IWX_FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @IWX_FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @IWX_FW_MAC_TYPE_P2P_STA: P2P client
+ * @IWX_FW_MAC_TYPE_GO: P2P GO
+ * @IWX_FW_MAC_TYPE_TEST: ?
+ * @IWX_FW_MAC_TYPE_MAX: highest support MAC type
+ */
+#define IWX_FW_MAC_TYPE_FIRST 1
+#define IWX_FW_MAC_TYPE_AUX IWX_FW_MAC_TYPE_FIRST
+#define IWX_FW_MAC_TYPE_LISTENER 2
+#define IWX_FW_MAC_TYPE_PIBSS 3
+#define IWX_FW_MAC_TYPE_IBSS 4
+#define IWX_FW_MAC_TYPE_BSS_STA 5
+#define IWX_FW_MAC_TYPE_P2P_DEVICE 6
+#define IWX_FW_MAC_TYPE_P2P_STA 7
+#define IWX_FW_MAC_TYPE_GO 8
+#define IWX_FW_MAC_TYPE_TEST 9
+#define IWX_FW_MAC_TYPE_MAX IWX_FW_MAC_TYPE_TEST
+/* IWX_MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * TSF hw timer ID
+ * @IWX_TSF_ID_A: use TSF A
+ * @IWX_TSF_ID_B: use TSF B
+ * @IWX_TSF_ID_C: use TSF C
+ * @IWX_TSF_ID_D: use TSF D
+ * @IWX_NUM_TSF_IDS: number of TSF timers available
+ */
+#define IWX_TSF_ID_A 0
+#define IWX_TSF_ID_B 1
+#define IWX_TSF_ID_C 2
+#define IWX_TSF_ID_D 3
+#define IWX_NUM_TSF_IDS 4
+/* IWX_TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwx_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * NOTE: obsolete from VER2 and on
+ * @beacon_template: beacon template ID
+ */
+struct iwx_mac_data_ap {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t dtim_interval;
+ uint32_t dtim_reciprocal;
+ uint32_t mcast_qid;
+ uint32_t beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
+ */
+struct iwx_mac_data_ibss {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t beacon_template;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwx_mac_data_sta {
+ uint32_t is_assoc;
+ uint32_t dtim_time;
+ uint64_t dtim_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t dtim_interval;
+ uint32_t dtim_reciprocal;
+ uint32_t listen_interval;
+ uint32_t assoc_id;
+ uint32_t assoc_beacon_arrive_time;
+} __packed; /* IWX_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwx_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwx_mac_data_go {
+ struct iwx_mac_data_ap ap;
+ uint32_t ctwin;
+ uint32_t opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwx_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwx_mac_data_p2p_sta {
+ struct iwx_mac_data_sta sta;
+ uint32_t ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwx_mac_data_pibss {
+ uint32_t stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwx_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwx_mac_data_p2p_dev {
+ uint32_t is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * MAC context filter flags
+ * @IWX_MAC_FILTER_IN_PROMISC: accept all data frames
+ * @IWX_MAC_FILTER_IN_CONTROL_AND_MGMT: pass all mangement and
+ * control frames to the host
+ * @IWX_MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @IWX_MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @IWX_MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @IWX_MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @IWX_MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @IWX_MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @IWX_MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+#define IWX_MAC_FILTER_IN_PROMISC (1 << 0)
+#define IWX_MAC_FILTER_IN_CONTROL_AND_MGMT (1 << 1)
+#define IWX_MAC_FILTER_ACCEPT_GRP (1 << 2)
+#define IWX_MAC_FILTER_DIS_DECRYPT (1 << 3)
+#define IWX_MAC_FILTER_DIS_GRP_DECRYPT (1 << 4)
+#define IWX_MAC_FILTER_IN_BEACON (1 << 6)
+#define IWX_MAC_FILTER_OUT_BCAST (1 << 8)
+#define IWX_MAC_FILTER_IN_CRC32 (1 << 11)
+#define IWX_MAC_FILTER_IN_PROBE_REQUEST (1 << 12)
+
+/**
+ * QoS flags
+ * @IWX_MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @IWX_MAC_QOS_FLG_TGN: HT is enabled
+ * @IWX_MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+#define IWX_MAC_QOS_FLG_UPDATE_EDCA (1 << 0)
+#define IWX_MAC_QOS_FLG_TGN (1 << 1)
+#define IWX_MAC_QOS_FLG_TXOP_TYPE (1 << 4)
+
+/**
+ * struct iwx_ac_qos - QOS timing params for IWX_MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwx_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwx_ac_qos {
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint8_t aifsn;
+ uint8_t fifos_mask;
+ uint16_t edca_txop;
+} __packed; /* IWX_AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( IWX_MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @mac_type: one of IWX_FW_MAC_TYPE_*
+ * @tsf_id: TSF HW timer, one of IWX_TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of IWX_MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of IWX_MAC_FILTER_*
+ * @qos_flags: from IWX_MAC_QOS_FLG_*
+ * @ac: one iwx_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwx_mac_data_*, according to mac_type
+ */
+struct iwx_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ uint32_t mac_type;
+ uint32_t tsf_id;
+ uint8_t node_addr[6];
+ uint16_t reserved_for_node_addr;
+ uint8_t bssid_addr[6];
+ uint16_t reserved_for_bssid_addr;
+ uint32_t cck_rates;
+ uint32_t ofdm_rates;
+ uint32_t protection_flags;
+ uint32_t cck_short_preamble;
+ uint32_t short_slot;
+ uint32_t filter_flags;
+ /* IWX_MAC_QOS_PARAM_API_S_VER_1 */
+ uint32_t qos_flags;
+ struct iwx_ac_qos ac[IWX_AC_NUM+1];
+ /* IWX_MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwx_mac_data_ap ap;
+ struct iwx_mac_data_go go;
+ struct iwx_mac_data_sta sta;
+ struct iwx_mac_data_p2p_sta p2p_sta;
+ struct iwx_mac_data_p2p_dev p2p_dev;
+ struct iwx_mac_data_pibss pibss;
+ struct iwx_mac_data_ibss ibss;
+ };
+} __packed; /* IWX_MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline uint32_t iwx_reciprocal(uint32_t v)
+{
+ if (!v)
+ return 0;
+ return 0xFFFFFFFF / v;
+}
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * masks for LTR config command flags
+ * @IWX_LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @IWX_LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ * memory access
+ * @IWX_LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ * reg change
+ * @IWX_LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ * D0 to D3
+ * @IWX_LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @IWX_LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @IWX_LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+#define IWX_LTR_CFG_FLAG_FEATURE_ENABLE 0x00000001
+#define IWX_LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS 0x00000002
+#define IWX_LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH 0x00000004
+#define IWX_LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 0x00000008
+#define IWX_LTR_CFG_FLAG_SW_SET_SHORT 0x00000010
+#define IWX_LTR_CFG_FLAG_SW_SET_LONG 0x00000020
+#define IWX_LTR_CFG_FLAG_DENIE_C10_ON_PD 0x00000040
+
+#define IWX_LTR_VALID_STATES_NUM 4
+
+/**
+ * struct iwx_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwx_ltr_config_flags
+ * @static_long:
+ * @static_short:
+ * @ltr_cfg_values:
+ * @ltr_short_idle_timeout:
+ */
+struct iwx_ltr_config_cmd {
+ uint32_t flags;
+ uint32_t static_long;
+ uint32_t static_short;
+ uint32_t ltr_cfg_values[IWX_LTR_VALID_STATES_NUM];
+ uint32_t ltr_short_idle_timeout;
+} __packed; /* LTR_CAPABLE_API_S_VER_2 */
+
+/* Radio LP RX Energy Threshold measured in dBm */
+#define IWX_POWER_LPRX_RSSI_THRESHOLD 75
+#define IWX_POWER_LPRX_RSSI_THRESHOLD_MAX 94
+#define IWX_POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
+/**
+ * Masks for iwx_mac_power_cmd command flags
+ * @IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @IWX_POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @IWX_POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @IWX_POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @IWX_POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+ * @IWX_POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @IWX_POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
+*/
+#define IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK (1 << 0)
+#define IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK (1 << 1)
+#define IWX_POWER_FLAGS_SKIP_OVER_DTIM_MSK (1 << 2)
+#define IWX_POWER_FLAGS_SNOOZE_ENA_MSK (1 << 5)
+#define IWX_POWER_FLAGS_BT_SCO_ENA (1 << 8)
+#define IWX_POWER_FLAGS_ADVANCE_PM_ENA_MSK (1 << 9)
+#define IWX_POWER_FLAGS_LPRX_ENA_MSK (1 << 11)
+#define IWX_POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK (1 << 12)
+
+#define IWX_POWER_VEC_SIZE 5
+
+/**
+ * Masks for device power command flags
+ * @IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
+ * '1' Allow to save power by turning off receiver and transmitter.
+ * '0' Do not allow. This flag should be always set to '1' unless
+ * one needs to disable actual power down for debug purposes.
+ * @IWX_DEVICE_POWER_FLAGS_CAM_MSK:
+ * '1' CAM (Continuous Active Mode) is set, power management is disabled.
+ * '0' Power management is enabled, one of the power schemes is applied.
+ */
+#define IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK (1 << 0)
+#define IWX_DEVICE_POWER_FLAGS_CAM_MSK (1 << 13)
+
+/**
+ * struct iwx_device_power_cmd - device wide power command.
+ * IWX_POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from IWX_DEVICE_POWER_FLAGS_*
+ */
+struct iwx_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ uint16_t flags;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * struct iwx_mac_power_cmd - New power command containing uAPSD support
+ * IWX_MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC contex identifier
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwx_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ uint32_t id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ uint16_t flags;
+ uint16_t keep_alive_seconds;
+ uint32_t rx_data_timeout;
+ uint32_t tx_data_timeout;
+ uint32_t rx_data_timeout_uapsd;
+ uint32_t tx_data_timeout_uapsd;
+ uint8_t lprx_rssi_threshold;
+ uint8_t skip_dtim_periods;
+ uint16_t snooze_interval;
+ uint16_t snooze_window;
+ uint8_t snooze_step;
+ uint8_t qndp_tid;
+ uint8_t uapsd_ac_flags;
+ uint8_t uapsd_max_sp;
+ uint8_t heavy_tx_thld_packets;
+ uint8_t heavy_rx_thld_packets;
+ uint8_t heavy_tx_thld_percentage;
+ uint8_t heavy_rx_thld_percentage;
+ uint8_t limited_ps_threshold;
+ uint8_t reserved;
+} __packed;
+
+#define IWX_DEFAULT_PS_TX_DATA_TIMEOUT (100 * 1000)
+#define IWX_DEFAULT_PS_RX_DATA_TIMEOUT (100 * 1000)
+
+/*
+ * struct iwx_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwx_uapsd_misbehaving_ap_notif {
+ uint32_t sta_id;
+ uint8_t mac_id;
+ uint8_t reserved[3];
+} __packed;
+
+/**
+ * struct iwx_beacon_filter_cmd
+ * IWX_REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @id_and_color: MAC contex identifier
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ * to driver if delta in Energy values calculated for this and last
+ * passed beacon is greater than this threshold. Zero value means that
+ * the Energy change is ignored for beacon filtering, and beacon will
+ * not be forced to be sent to driver regardless of this delta. Typical
+ * energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ * Send beacon to driver if delta in Energy values calculated for this
+ * and last passed beacon is greater than this threshold. Zero value
+ * means that the Energy change is ignored for beacon filtering while in
+ * Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ * calculated for current beacon is less than the threshold, use
+ * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ * Threshold. Typical energy threshold is -72dBm.
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_escape_timer: Send beacons to driver if no beacons were passed
+ * for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ * for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ */
+struct iwx_beacon_filter_cmd {
+ uint32_t bf_energy_delta;
+ uint32_t bf_roaming_energy_delta;
+ uint32_t bf_roaming_state;
+ uint32_t bf_temp_threshold;
+ uint32_t bf_temp_fast_filter;
+ uint32_t bf_temp_slow_filter;
+ uint32_t bf_enable_beacon_filter;
+ uint32_t bf_debug_flag;
+ uint32_t bf_escape_timer;
+ uint32_t ba_escape_timer;
+ uint32_t ba_enable_beacon_abort;
+} __packed;
+
+/* Beacon filtering and beacon abort */
+#define IWX_BF_ENERGY_DELTA_DEFAULT 5
+#define IWX_BF_ENERGY_DELTA_MAX 255
+#define IWX_BF_ENERGY_DELTA_MIN 0
+
+#define IWX_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWX_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWX_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWX_BF_ROAMING_STATE_DEFAULT 72
+#define IWX_BF_ROAMING_STATE_MAX 255
+#define IWX_BF_ROAMING_STATE_MIN 0
+
+#define IWX_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWX_BF_TEMP_THRESHOLD_MAX 255
+#define IWX_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWX_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWX_BF_TEMP_FAST_FILTER_MAX 255
+#define IWX_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWX_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWX_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWX_BF_TEMP_SLOW_FILTER_MIN 0
+
+#define IWX_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWX_BF_DEBUG_FLAG_DEFAULT 0
+
+#define IWX_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWX_BF_ESCAPE_TIMER_MAX 1024
+#define IWX_BF_ESCAPE_TIMER_MIN 0
+
+#define IWX_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWX_BA_ESCAPE_TIMER_D3 9
+#define IWX_BA_ESCAPE_TIMER_MAX 1024
+#define IWX_BA_ESCAPE_TIMER_MIN 0
+
+#define IWX_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWX_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = htole32(IWX_BF_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_energy_delta = \
+ htole32(IWX_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_state = htole32(IWX_BF_ROAMING_STATE_DEFAULT), \
+ .bf_temp_threshold = htole32(IWX_BF_TEMP_THRESHOLD_DEFAULT), \
+ .bf_temp_fast_filter = htole32(IWX_BF_TEMP_FAST_FILTER_DEFAULT), \
+ .bf_temp_slow_filter = htole32(IWX_BF_TEMP_SLOW_FILTER_DEFAULT), \
+ .bf_debug_flag = htole32(IWX_BF_DEBUG_FLAG_DEFAULT), \
+ .bf_escape_timer = htole32(IWX_BF_ESCAPE_TIMER_DEFAULT), \
+ .ba_escape_timer = htole32(IWX_BA_ESCAPE_TIMER_DEFAULT)
+
+/* uCode API values for HT/VHT bit rates */
+#define IWX_RATE_HT_SISO_MCS_0_PLCP 0
+#define IWX_RATE_HT_SISO_MCS_1_PLCP 1
+#define IWX_RATE_HT_SISO_MCS_2_PLCP 2
+#define IWX_RATE_HT_SISO_MCS_3_PLCP 3
+#define IWX_RATE_HT_SISO_MCS_4_PLCP 4
+#define IWX_RATE_HT_SISO_MCS_5_PLCP 5
+#define IWX_RATE_HT_SISO_MCS_6_PLCP 6
+#define IWX_RATE_HT_SISO_MCS_7_PLCP 7
+#define IWX_RATE_HT_MIMO2_MCS_8_PLCP 0x8
+#define IWX_RATE_HT_MIMO2_MCS_9_PLCP 0x9
+#define IWX_RATE_HT_MIMO2_MCS_10_PLCP 0xA
+#define IWX_RATE_HT_MIMO2_MCS_11_PLCP 0xB
+#define IWX_RATE_HT_MIMO2_MCS_12_PLCP 0xC
+#define IWX_RATE_HT_MIMO2_MCS_13_PLCP 0xD
+#define IWX_RATE_HT_MIMO2_MCS_14_PLCP 0xE
+#define IWX_RATE_HT_MIMO2_MCS_15_PLCP 0xF
+#define IWX_RATE_VHT_SISO_MCS_0_PLCP 0
+#define IWX_RATE_VHT_SISO_MCS_1_PLCP 1
+#define IWX_RATE_VHT_SISO_MCS_2_PLCP 2
+#define IWX_RATE_VHT_SISO_MCS_3_PLCP 3
+#define IWX_RATE_VHT_SISO_MCS_4_PLCP 4
+#define IWX_RATE_VHT_SISO_MCS_5_PLCP 5
+#define IWX_RATE_VHT_SISO_MCS_6_PLCP 6
+#define IWX_RATE_VHT_SISO_MCS_7_PLCP 7
+#define IWX_RATE_VHT_SISO_MCS_8_PLCP 8
+#define IWX_RATE_VHT_SISO_MCS_9_PLCP 9
+#define IWX_RATE_VHT_MIMO2_MCS_0_PLCP 0x10
+#define IWX_RATE_VHT_MIMO2_MCS_1_PLCP 0x11
+#define IWX_RATE_VHT_MIMO2_MCS_2_PLCP 0x12
+#define IWX_RATE_VHT_MIMO2_MCS_3_PLCP 0x13
+#define IWX_RATE_VHT_MIMO2_MCS_4_PLCP 0x14
+#define IWX_RATE_VHT_MIMO2_MCS_5_PLCP 0x15
+#define IWX_RATE_VHT_MIMO2_MCS_6_PLCP 0x16
+#define IWX_RATE_VHT_MIMO2_MCS_7_PLCP 0x17
+#define IWX_RATE_VHT_MIMO2_MCS_8_PLCP 0x18
+#define IWX_RATE_VHT_MIMO2_MCS_9_PLCP 0x19
+#define IWX_RATE_HT_SISO_MCS_INV_PLCP 0x20
+#define IWX_RATE_HT_MIMO2_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_VHT_SISO_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_VHT_MIMO2_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_HT_SISO_MCS_8_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_HT_SISO_MCS_9_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+
+/*
+ * These serve as indexes into struct iwx_rate iwx_rates[IWX_RIDX_MAX].
+ */
+enum {
+ IWX_RATE_1M_INDEX = 0,
+ IWX_FIRST_CCK_RATE = IWX_RATE_1M_INDEX,
+ IWX_RATE_2M_INDEX,
+ IWX_RATE_5M_INDEX,
+ IWX_RATE_11M_INDEX,
+ IWX_LAST_CCK_RATE = IWX_RATE_11M_INDEX,
+ IWX_RATE_6M_INDEX,
+ IWX_FIRST_OFDM_RATE = IWX_RATE_6M_INDEX,
+ IWX_RATE_MCS_0_INDEX = IWX_RATE_6M_INDEX,
+ IWX_FIRST_HT_RATE = IWX_RATE_MCS_0_INDEX,
+ IWX_FIRST_VHT_RATE = IWX_RATE_MCS_0_INDEX,
+ IWX_RATE_9M_INDEX,
+ IWX_RATE_12M_INDEX,
+ IWX_RATE_MCS_1_INDEX = IWX_RATE_12M_INDEX,
+ IWX_RATE_MCS_8_INDEX,
+ IWX_FIRST_HT_MIMO2_RATE = IWX_RATE_MCS_8_INDEX,
+ IWX_RATE_18M_INDEX,
+ IWX_RATE_MCS_2_INDEX = IWX_RATE_18M_INDEX,
+ IWX_RATE_24M_INDEX,
+ IWX_RATE_MCS_3_INDEX = IWX_RATE_24M_INDEX,
+ IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_36M_INDEX,
+ IWX_RATE_MCS_4_INDEX = IWX_RATE_36M_INDEX,
+ IWX_RATE_MCS_10_INDEX,
+ IWX_RATE_48M_INDEX,
+ IWX_RATE_MCS_5_INDEX = IWX_RATE_48M_INDEX,
+ IWX_RATE_MCS_11_INDEX,
+ IWX_RATE_54M_INDEX,
+ IWX_RATE_MCS_6_INDEX = IWX_RATE_54M_INDEX,
+ IWX_LAST_NON_HT_RATE = IWX_RATE_54M_INDEX,
+ IWX_RATE_MCS_7_INDEX,
+ IWX_LAST_HT_SISO_RATE = IWX_RATE_MCS_7_INDEX,
+ IWX_RATE_MCS_12_INDEX,
+ IWX_RATE_MCS_13_INDEX,
+ IWX_RATE_MCS_14_INDEX,
+ IWX_RATE_MCS_15_INDEX,
+ IWX_LAST_HT_RATE = IWX_RATE_MCS_15_INDEX,
+ IWX_LAST_VHT_RATE = IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_COUNT_LEGACY = IWX_LAST_NON_HT_RATE + 1,
+ IWX_RATE_COUNT = IWX_LAST_HT_RATE + 1,
+};
+
+#define IWX_RATE_BIT_MSK(r) (1 << (IWX_RATE_##r##M_INDEX))
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+#define IWX_RATE_6M_PLCP 13
+#define IWX_RATE_9M_PLCP 15
+#define IWX_RATE_12M_PLCP 5
+#define IWX_RATE_18M_PLCP 7
+#define IWX_RATE_24M_PLCP 9
+#define IWX_RATE_36M_PLCP 11
+#define IWX_RATE_48M_PLCP 1
+#define IWX_RATE_54M_PLCP 3
+#define IWX_RATE_1M_PLCP 10
+#define IWX_RATE_2M_PLCP 20
+#define IWX_RATE_5M_PLCP 55
+#define IWX_RATE_11M_PLCP 110
+#define IWX_RATE_INVM_PLCP 0xff
+
+/*
+ * rate_n_flags bit fields
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define IWX_RATE_MCS_HT_POS 8
+#define IWX_RATE_MCS_HT_MSK (1 << IWX_RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define IWX_RATE_MCS_CCK_POS 9
+#define IWX_RATE_MCS_CCK_MSK (1 << IWX_RATE_MCS_CCK_POS)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define IWX_RATE_MCS_VHT_POS 26
+#define IWX_RATE_MCS_VHT_MSK (1 << IWX_RATE_MCS_VHT_POS)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define IWX_RATE_HT_MCS_RATE_CODE_MSK 0x7
+#define IWX_RATE_HT_MCS_NSS_POS 3
+#define IWX_RATE_HT_MCS_NSS_MSK (3 << IWX_RATE_HT_MCS_NSS_POS)
+
+/* Bit 10: (1) Use Green Field preamble */
+#define IWX_RATE_HT_MCS_GF_POS 10
+#define IWX_RATE_HT_MCS_GF_MSK (1 << IWX_RATE_HT_MCS_GF_POS)
+
+#define IWX_RATE_HT_MCS_INDEX_MSK 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define IWX_RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define IWX_RATE_VHT_MCS_NSS_POS 4
+#define IWX_RATE_VHT_MCS_NSS_MSK (3 << IWX_RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define IWX_RATE_LEGACY_RATE_MSK 0xff
+
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define IWX_RATE_MCS_CHAN_WIDTH_POS 11
+#define IWX_RATE_MCS_CHAN_WIDTH_MSK (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_20 (0 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_40 (1 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_80 (2 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_160 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define IWX_RATE_MCS_SGI_POS 13
+#define IWX_RATE_MCS_SGI_MSK (1 << IWX_RATE_MCS_SGI_POS)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define IWX_RATE_MCS_ANT_POS 14
+#define IWX_RATE_MCS_ANT_A_MSK (1 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_B_MSK (2 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_C_MSK (4 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_AB_MSK (IWX_RATE_MCS_ANT_A_MSK | \
+ IWX_RATE_MCS_ANT_B_MSK)
+#define IWX_RATE_MCS_ANT_ABC_MSK (IWX_RATE_MCS_ANT_AB_MSK | \
+ IWX_RATE_MCS_ANT_C_MSK)
+#define IWX_RATE_MCS_ANT_MSK IWX_RATE_MCS_ANT_ABC_MSK
+#define IWX_RATE_MCS_ANT_NUM 3
+
+/* Bit 17-18: (0) SS, (1) SS*2 */
+#define IWX_RATE_MCS_STBC_POS 17
+#define IWX_RATE_MCS_STBC_MSK (1 << IWX_RATE_MCS_STBC_POS)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define IWX_RATE_MCS_BF_POS 19
+#define IWX_RATE_MCS_BF_MSK (1 << IWX_RATE_MCS_BF_POS)
+
+/* Bit 20: (0) ZLF is off, (1) ZLF is on */
+#define IWX_RATE_MCS_ZLF_POS 20
+#define IWX_RATE_MCS_ZLF_MSK (1 << IWX_RATE_MCS_ZLF_POS)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define IWX_RATE_MCS_DUP_POS 24
+#define IWX_RATE_MCS_DUP_MSK (3 << IWX_RATE_MCS_DUP_POS)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define IWX_RATE_MCS_LDPC_POS 27
+#define IWX_RATE_MCS_LDPC_MSK (1 << IWX_RATE_MCS_LDPC_POS)
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define IWX_LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define IWX_LQ_FLAG_USE_RTS_POS 0
+#define IWX_LQ_FLAG_USE_RTS_MSK (1 << IWX_LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define IWX_LQ_FLAG_COLOR_POS 1
+#define IWX_LQ_FLAG_COLOR_MSK (7 << IWX_LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define IWX_LQ_FLAG_RTS_BW_SIG_POS 4
+#define IWX_LQ_FLAG_RTS_BW_SIG_NONE (0 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWX_LQ_FLAG_RTS_BW_SIG_STATIC (1 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWX_LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define IWX_LQ_FLAG_DYNAMIC_BW_POS 6
+#define IWX_LQ_FLAG_DYNAMIC_BW_MSK (1 << IWX_LQ_FLAG_DYNAMIC_BW_POS)
+
+/* Antenna flags. */
+#define IWX_ANT_A (1 << 0)
+#define IWX_ANT_B (1 << 1)
+#define IWX_ANT_C (1 << 2)
+/* Shortcuts. */
+#define IWX_ANT_AB (IWX_ANT_A | IWX_ANT_B)
+#define IWX_ANT_BC (IWX_ANT_B | IWX_ANT_C)
+#define IWX_ANT_ABC (IWX_ANT_A | IWX_ANT_B | IWX_ANT_C)
+
+/**
+ * bitmasks for tx_flags in TX command
+ * @IWX_TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @IWX_TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @IWX_TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ * Otherwise, use rate_n_flags from the TX command
+ * @IWX_TX_CMD_FLG_BA: this frame is a block ack
+ * @IWX_TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ * Must set IWX_TX_CMD_FLG_ACK with this flag.
+ * @IWX_TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection
+ * @IWX_TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @IWX_TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @IWX_TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @IWX_TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @IWX_TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @IWX_TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @IWX_TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame
+ * @IWX_TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ * Should be set for beacons and probe responses
+ * @IWX_TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @IWX_TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @IWX_TX_CMD_FLG_AGG_START: allow this frame to start aggregation
+ * @IWX_TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ * Should be set for 26/30 length MAC headers
+ * @IWX_TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @IWX_TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
+ * @IWX_TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @IWX_TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @IWX_TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @IWX_TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @IWX_TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @IWX_TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+#define IWX_TX_CMD_FLG_PROT_REQUIRE (1 << 0)
+#define IWX_TX_CMD_FLG_ACK (1 << 3)
+#define IWX_TX_CMD_FLG_STA_RATE (1 << 4)
+#define IWX_TX_CMD_FLG_BA (1 << 5)
+#define IWX_TX_CMD_FLG_BAR (1 << 6)
+#define IWX_TX_CMD_FLG_TXOP_PROT (1 << 7)
+#define IWX_TX_CMD_FLG_VHT_NDPA (1 << 8)
+#define IWX_TX_CMD_FLG_HT_NDPA (1 << 9)
+#define IWX_TX_CMD_FLG_CSI_FDBK2HOST (1 << 10)
+#define IWX_TX_CMD_FLG_BT_DIS (1 << 12)
+#define IWX_TX_CMD_FLG_SEQ_CTL (1 << 13)
+#define IWX_TX_CMD_FLG_MORE_FRAG (1 << 14)
+#define IWX_TX_CMD_FLG_NEXT_FRAME (1 << 15)
+#define IWX_TX_CMD_FLG_TSF (1 << 16)
+#define IWX_TX_CMD_FLG_CALIB (1 << 17)
+#define IWX_TX_CMD_FLG_KEEP_SEQ_CTL (1 << 18)
+#define IWX_TX_CMD_FLG_AGG_START (1 << 19)
+#define IWX_TX_CMD_FLG_MH_PAD (1 << 20)
+#define IWX_TX_CMD_FLG_RESP_TO_DRV (1 << 21)
+#define IWX_TX_CMD_FLG_CCMP_AGG (1 << 22)
+#define IWX_TX_CMD_FLG_TKIP_MIC_DONE (1 << 23)
+#define IWX_TX_CMD_FLG_DUR (1 << 25)
+#define IWX_TX_CMD_FLG_FW_DROP (1 << 26)
+#define IWX_TX_CMD_FLG_EXEC_PAPD (1 << 27)
+#define IWX_TX_CMD_FLG_PAPD_TYPE (1 << 28)
+#define IWX_TX_CMD_FLG_HCCA_CHUNK (1U << 31)
+/* IWX_TX_FLAGS_BITS_API_S_VER_1 */
+
+/*
+ * TX command security control
+ */
+#define IWX_TX_CMD_SEC_WEP 0x01
+#define IWX_TX_CMD_SEC_CCM 0x02
+#define IWX_TX_CMD_SEC_TKIP 0x03
+#define IWX_TX_CMD_SEC_EXT 0x04
+#define IWX_TX_CMD_SEC_MSK 0x07
+#define IWX_TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define IWX_TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+#define IWX_TX_CMD_SEC_KEY128 0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (IWX_TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define IWX_TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
+#define IWX_TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
+#define IWX_TX_CMD_NEXT_FRAME_BA_MSK (0x20)
+#define IWX_TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
+#define IWX_TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
+#define IWX_TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
+#define IWX_TX_CMD_NEXT_FRAME_STA_ID_POS (8)
+#define IWX_TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
+#define IWX_TX_CMD_NEXT_FRAME_RATE_POS (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define IWX_TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define IWX_TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define IWX_TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define IWX_TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWX_TID_NON_QOS 0
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWX_DEFAULT_TX_RETRY 15
+#define IWX_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWX_RTS_DFAULT_RETRY_LIMIT 3
+#define IWX_BAR_DFAULT_RETRY_LIMIT 60
+#define IWX_LOW_RETRY_LIMIT 7
+
+/*
+ * The FH will write back to the first TB only, so we need to copy some data
+ * into the buffer regardless of whether it should be mapped or not.
+ * This indicates how big the first TB must be to include the scratch buffer
+ * and the assigned PN.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
+ * If we make it bigger then allocations will be bigger and copy slower, so
+ * that's probably not useful.
+ */
+#define IWX_FIRST_TB_SIZE 20
+#define IWX_FIRST_TB_SIZE_ALIGN ((IWX_FIRST_TB_SIZE + (64 - 1)) & ~(64 - 1))
+
+/**
+ * %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
+ * from mac header end. For normal case it is 4 words for SNAP.
+ * note: tx_cmd, mac header and pad are not counted in the offset.
+ * This is used to help the offload in case there is tunneling such as
+ * IPv6 in IPv4, in such case the ip header offset should point to the
+ * inner ip header and IPv4 checksum of the external header should be
+ * calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ * field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ * alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+#define IWX_TX_CMD_OFFLD_IP_HDR (1 << 0)
+#define IWX_TX_CMD_OFFLD_L4_EN (1 << 6)
+#define IWX_TX_CMD_OFFLD_L3_EN (1 << 7)
+#define IWX_TX_CMD_OFFLD_MH_SIZE (1 << 8)
+#define IWX_TX_CMD_OFFLD_PAD (1 << 13)
+#define IWX_TX_CMD_OFFLD_AMSDU (1 << 14)
+#define IWX_TX_CMD_OFFLD_MH_MASK 0x1f
+#define IWX_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+
+struct iwx_dram_sec_info {
+ uint32_t pn_low;
+ uint16_t pn_high;
+ uint16_t aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * bitmasks for tx_flags in TX command for 22000
+ * @IWX_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWX_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ * to a secured STA
+ * @IWX_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ * selection, retry limits and BT kill
+ */
+#define IWX_TX_FLAGS_CMD_RATE (1 << 0)
+#define IWX_TX_FLAGS_ENCRYPT_DIS (1 << 1)
+#define IWX_TX_FLAGS_HIGH_PRI (1 << 2)
+/* TX_FLAGS_BITS_API_S_VER_3 */
+
+/**
+ * struct iwx_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @flags: combination of TX_CMD_FLG_*
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @hdr: 802.11 header
+ */
+struct iwx_tx_cmd_gen2 {
+ uint16_t len;
+ uint16_t offload_assist;
+ uint32_t flags;
+ struct iwx_dram_sec_info dram_info;
+ uint32_t rate_n_flags;
+ struct ieee80211_frame hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * status that is returned by the fw after attempts to Tx
+ * @IWX_TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * Valid only if frame_count =1
+ */
+#define IWX_TX_STATUS_MSK 0x000000ff
+#define IWX_TX_STATUS_SUCCESS 0x01
+#define IWX_TX_STATUS_DIRECT_DONE 0x02
+/* postpone TX */
+#define IWX_TX_STATUS_POSTPONE_DELAY 0x40
+#define IWX_TX_STATUS_POSTPONE_FEW_BYTES 0x41
+#define IWX_TX_STATUS_POSTPONE_BT_PRIO 0x42
+#define IWX_TX_STATUS_POSTPONE_QUIET_PERIOD 0x43
+#define IWX_TX_STATUS_POSTPONE_CALC_TTAK 0x44
+/* abort TX */
+#define IWX_TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY 0x81
+#define IWX_TX_STATUS_FAIL_SHORT_LIMIT 0x82
+#define IWX_TX_STATUS_FAIL_LONG_LIMIT 0x83
+#define IWX_TX_STATUS_FAIL_UNDERRUN 0x84
+#define IWX_TX_STATUS_FAIL_DRAIN_FLOW 0x85
+#define IWX_TX_STATUS_FAIL_RFKILL_FLUSH 0x86
+#define IWX_TX_STATUS_FAIL_LIFE_EXPIRE 0x87
+#define IWX_TX_STATUS_FAIL_DEST_PS 0x88
+#define IWX_TX_STATUS_FAIL_HOST_ABORTED 0x89
+#define IWX_TX_STATUS_FAIL_BT_RETRY 0x8a
+#define IWX_TX_STATUS_FAIL_STA_INVALID 0x8b
+#define IWX_TX_STATUS_FAIL_FRAG_DROPPED 0x8c
+#define IWX_TX_STATUS_FAIL_TID_DISABLE 0x8d
+#define IWX_TX_STATUS_FAIL_FIFO_FLUSHED 0x8e
+#define IWX_TX_STATUS_FAIL_SMALL_CF_POLL 0x8f
+#define IWX_TX_STATUS_FAIL_FW_DROP 0x90
+#define IWX_TX_STATUS_FAIL_STA_COLOR_MISMATCH 0x91
+#define IWX_TX_STATUS_INTERNAL_ABORT 0x92
+#define IWX_TX_MODE_MSK 0x00000f00
+#define IWX_TX_MODE_NO_BURST 0x00000000
+#define IWX_TX_MODE_IN_BURST_SEQ 0x00000100
+#define IWX_TX_MODE_FIRST_IN_BURST 0x00000200
+#define IWX_TX_QUEUE_NUM_MSK 0x0001f000
+#define IWX_TX_NARROW_BW_MSK 0x00060000
+#define IWX_TX_NARROW_BW_1DIV2 0x00020000
+#define IWX_TX_NARROW_BW_1DIV4 0x00040000
+#define IWX_TX_NARROW_BW_1DIV8 0x00060000
+
+/*
+ * TX aggregation status
+ * @IWX_AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ * @IWX_AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ */
+#define IWX_AGG_TX_STATE_STATUS_MSK 0x0fff
+#define IWX_AGG_TX_STATE_TRANSMITTED 0x0000
+#define IWX_AGG_TX_STATE_UNDERRUN 0x0001
+#define IWX_AGG_TX_STATE_BT_PRIO 0x0002
+#define IWX_AGG_TX_STATE_FEW_BYTES 0x0004
+#define IWX_AGG_TX_STATE_ABORT 0x0008
+#define IWX_AGG_TX_STATE_LAST_SENT_TTL 0x0010
+#define IWX_AGG_TX_STATE_LAST_SENT_TRY_CNT 0x0020
+#define IWX_AGG_TX_STATE_LAST_SENT_BT_KILL 0x0040
+#define IWX_AGG_TX_STATE_SCD_QUERY 0x0080
+#define IWX_AGG_TX_STATE_TEST_BAD_CRC32 0x0100
+#define IWX_AGG_TX_STATE_RESPONSE 0x01ff
+#define IWX_AGG_TX_STATE_DUMP_TX 0x0200
+#define IWX_AGG_TX_STATE_DELAY_TX 0x0400
+#define IWX_AGG_TX_STATE_TRY_CNT_POS 12
+#define IWX_AGG_TX_STATE_TRY_CNT_MSK (0xf << IWX_AGG_TX_STATE_TRY_CNT_POS)
+
+#define IWX_AGG_TX_STATE_LAST_SENT_MSK (IWX_AGG_TX_STATE_LAST_SENT_TTL| \
+ IWX_AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+ IWX_AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define IWX_AGG_TX_STAT_FRAME_NOT_SENT (IWX_AGG_TX_STATE_FEW_BYTES | \
+ IWX_AGG_TX_STATE_ABORT | \
+ IWX_AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * IWX_REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct iwx_agg_tx_status - per packet TX aggregation status
+ * @status: enum iwx_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct iwx_agg_tx_status {
+ uint16_t status;
+ uint16_t sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define IWX_TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define IWX_TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define IWX_TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWX_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWX_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwx_tx_resp_v3 - notifies that fw is TXing a packet
+ * ( IWX_REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. IWX_RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status IWX_TX_STATUS_*
+ * for agg: status of 1st frame, IWX_AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwx_get_scd_ssn for more details.
+ */
+struct iwx_tx_resp_v3 {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint16_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+
+ struct iwx_agg_tx_status status;
+} __packed; /* IWX_TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwx_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
+ * @reserved2: reserved for padding/alignment
+ * @status: for non-agg: frame status TX_STATUS_*
+ * For version 6 TX response isn't received for aggregation at all.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwx_tx_resp {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint8_t reduced_tpc;
+ uint8_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+ uint16_t tx_queue;
+ uint16_t reserved2;
+ struct iwx_agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_6 */
+
+/**
+ * struct iwx_ba_notif - notifies about reception of BA
+ * ( IWX_BA_NOTIF = 0xc5 )
+ * @sta_addr_lo32: lower 32 bits of the MAC address
+ * @sta_addr_hi16: upper 16 bits of the MAC address
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl:
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ */
+struct iwx_ba_notif {
+ uint32_t sta_addr_lo32;
+ uint16_t sta_addr_hi16;
+ uint16_t reserved;
+
+ uint8_t sta_id;
+ uint8_t tid;
+ uint16_t seq_ctl;
+ uint64_t bitmap;
+ uint16_t scd_flow;
+ uint16_t scd_ssn;
+ uint8_t txed;
+ uint8_t txed_2_done;
+ uint16_t reserved1;
+} __packed;
+
+struct iwx_beacon_notif {
+ struct iwx_tx_resp_v3 beacon_notify_hdr;
+ uint64_t tsf;
+ uint32_t ibss_mgr_status;
+} __packed;
+
+/**
+ * dump (flush) control flags
+ * @IWX_DUMP_TX_FIFO_FLUSH: Dump MSDUs until the FIFO is empty
+ * and the TFD queues are empty.
+ */
+#define IWX_DUMP_TX_FIFO_FLUSH (1 << 1)
+
+/**
+ * struct iwx_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwx_tx_path_flush_cmd_v1 {
+ uint32_t queues_ctl;
+ uint16_t flush_ctl;
+ uint16_t reserved;
+} __packed; /* IWX_TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwx_tx_path_flush_cmd {
+ uint32_t sta_id;
+ uint16_t tid_mask;
+ uint16_t reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
+/**
+ * iwx_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline uint32_t iwx_get_scd_ssn(struct iwx_tx_resp *tx_resp)
+{
+ return le32_to_cpup((uint32_t *)&tx_resp->status +
+ tx_resp->frame_count) & 0xfff;
+}
+
+/**
+ * struct iwx_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token:
+ * @sta_id: station id
+ * @tid:
+ * @scd_queue: scheduler queue to confiug
+ * @enable: 1 queue enable, 0 queue disable
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: %enum iwx_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ */
+struct iwx_scd_txq_cfg_cmd {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+ uint8_t enable;
+ uint8_t aggregate;
+ uint8_t tx_fifo;
+ uint8_t window;
+ uint16_t ssn;
+ uint16_t reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwx_scd_txq_cfg_rsp {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
+
+/* Scan Commands, Responses, Notifications */
+
+/* Max number of IEs for direct SSID scans in a command */
+#define IWX_PROBE_OPTION_MAX 20
+
+/**
+ * struct iwx_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in IWX_REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwx_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwx_ssid_ie {
+ uint8_t id;
+ uint8_t len;
+ uint8_t ssid[IEEE80211_NWID_LEN];
+} __packed; /* IWX_SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/* scan offload */
+#define IWX_SCAN_MAX_BLACKLIST_LEN 64
+#define IWX_SCAN_SHORT_BLACKLIST_LEN 16
+#define IWX_SCAN_MAX_PROFILES 11
+#define IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWX_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWX_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWX_CAN_ABORT_STATUS 1
+
+#define IWX_FULL_SCAN_MULTIPLIER 5
+#define IWX_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWX_MAX_SCHED_SCAN_PLANS 2
+
+/**
+ * iwx_scan_schedule_lmac - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwx_scan_schedule_lmac {
+ uint16_t delay;
+ uint8_t iterations;
+ uint8_t full_scan_mul;
+} __packed; /* SCAN_SCHEDULE_API_S */
+
+/**
+ * iwx_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwx_scan_req_tx_cmd {
+ uint32_t tx_flags;
+ uint32_t rate_n_flags;
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed;
+
+#define IWX_UNIFIED_SCAN_CHANNEL_FULL (1 << 27)
+#define IWX_UNIFIED_SCAN_CHANNEL_PARTIAL (1 << 28)
+
+/**
+ * iwx_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags: bits 1-20: directed scan to i'th ssid
+ * other bits &enum iwx_scan_channel_flags_lmac
+ * @channel_number: channel number 1-13 etc
+ * @iter_count: scan iteration on this channel
+ * @iter_interval: interval in seconds between iterations on one channel
+ */
+struct iwx_scan_channel_cfg_lmac {
+ uint32_t flags;
+ uint16_t channel_num;
+ uint16_t iter_count;
+ uint32_t iter_interval;
+} __packed;
+
+/*
+ * iwx_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwx_scan_probe_segment {
+ uint16_t offset;
+ uint16_t len;
+} __packed;
+
+/* iwx_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwx_scan_probe_req_v1 {
+ struct iwx_scan_probe_segment mac_header;
+ struct iwx_scan_probe_segment band_data[2];
+ struct iwx_scan_probe_segment common_data;
+ uint8_t buf[IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwx_scan_probe_req {
+ struct iwx_scan_probe_segment mac_header;
+ struct iwx_scan_probe_segment band_data[3];
+ struct iwx_scan_probe_segment common_data;
+ uint8_t buf[IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+
+#define IWX_SCAN_CHANNEL_FLAG_EBS (1 << 0)
+#define IWX_SCAN_CHANNEL_FLAG_EBS_ACCURATE (1 << 1)
+#define IWX_SCAN_CHANNEL_FLAG_CACHE_ADD (1 << 2)
+
+/* iwx_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwx_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
+ */
+struct iwx_scan_channel_opt {
+ uint16_t flags;
+ uint16_t non_ebs_ratio;
+} __packed;
+
+#define IWX_SCAN_PRIORITY_LOW 0
+#define IWX_SCAN_PRIORITY_MEDIUM 1
+#define IWX_SCAN_PRIORITY_HIGH 2
+
+/**
+ * iwx_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwx_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwx_scan_ebs_status
+ * @time_after_last_iter; time in seconds elapsed after last iteration
+ */
+struct iwx_periodic_scan_complete {
+ uint8_t last_schedule_line;
+ uint8_t last_schedule_iteration;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_after_last_iter;
+ uint32_t reserved;
+} __packed;
+
+/**
+ * struct iwx_scan_results_notif - scan results for one channel -
+ * SCAN_RESULT_NTF_API_S_VER_3
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: IWX_SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ */
+struct iwx_scan_results_notif {
+ uint8_t channel;
+ uint8_t band;
+ uint8_t probe_status;
+ uint8_t num_probe_not_sent;
+ uint32_t duration;
+} __packed;
+
+#define IWX_SCAN_CLIENT_SCHED_SCAN (1 << 0)
+#define IWX_SCAN_CLIENT_NETDETECT (1 << 1)
+#define IWX_SCAN_CLIENT_ASSET_TRACKING (1 << 2)
+
+/**
+ * iwx_scan_offload_blacklist - IWX_SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
+ */
+struct iwx_scan_offload_blacklist {
+ uint8_t ssid[ETHER_ADDR_LEN];
+ uint8_t reported_rssi;
+ uint8_t client_bitmap;
+} __packed;
+
+#define IWX_NETWORK_TYPE_BSS 1
+#define IWX_NETWORK_TYPE_IBSS 2
+#define IWX_NETWORK_TYPE_ANY 3
+
+#define IWX_SCAN_OFFLOAD_SELECT_2_4 0x4
+#define IWX_SCAN_OFFLOAD_SELECT_5_2 0x8
+#define IWX_SCAN_OFFLOAD_SELECT_ANY 0xc
+
+/**
+ * iwx_scan_offload_profile - IWX_SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption olgorithm to match - bitmap
+ * @aut_alg: authentication olgorithm to match - bitmap
+ * @network_type: enum iwx_scan_offload_network_type
+ * @band_selection: enum iwx_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
+ */
+struct iwx_scan_offload_profile {
+ uint8_t ssid_index;
+ uint8_t unicast_cipher;
+ uint8_t auth_alg;
+ uint8_t network_type;
+ uint8_t band_selection;
+ uint8_t client_bitmap;
+ uint8_t reserved[2];
+} __packed;
+
+/**
+ * iwx_scan_offload_profile_cfg - IWX_SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blaclist: AP list to filter off from scan results
+ * @profiles: profiles to search for match
+ * @blacklist_len: length of blacklist
+ * @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
+ */
+struct iwx_scan_offload_profile_cfg {
+ struct iwx_scan_offload_profile profiles[IWX_SCAN_MAX_PROFILES];
+ uint8_t blacklist_len;
+ uint8_t num_profiles;
+ uint8_t match_notify;
+ uint8_t pass_match;
+ uint8_t active_clients;
+ uint8_t any_beacon_notify;
+ uint8_t reserved[2];
+} __packed;
+
+#define IWX_SCAN_OFFLOAD_COMPLETED 1
+#define IWX_SCAN_OFFLOAD_ABORTED 2
+
+/* UMAC Scan API */
+
+#define IWX_SCAN_CONFIG_FLAG_ACTIVATE (1 << 0)
+#define IWX_SCAN_CONFIG_FLAG_DEACTIVATE (1 << 1)
+#define IWX_SCAN_CONFIG_FLAG_FORBID_CHUB_REQS (1 << 2)
+#define IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS (1 << 3)
+#define IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS (1 << 8)
+#define IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS (1 << 9)
+#define IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID (1 << 10)
+#define IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES (1 << 11)
+#define IWX_SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES (1 << 12)
+#define IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS (1 << 13)
+#define IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES (1 << 14)
+#define IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR (1 << 15)
+#define IWX_SCAN_CONFIG_FLAG_SET_FRAGMENTED (1 << 16)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED (1 << 17)
+#define IWX_SCAN_CONFIG_FLAG_SET_CAM_MODE (1 << 18)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_CAM_MODE (1 << 19)
+#define IWX_SCAN_CONFIG_FLAG_SET_PROMISC_MODE (1 << 20)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE (1 << 21)
+
+/* Bits 26-31 are for num of channels in channel_array */
+#define IWX_SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+
+/* OFDM basic rates */
+#define IWX_SCAN_CONFIG_RATE_6M (1 << 0)
+#define IWX_SCAN_CONFIG_RATE_9M (1 << 1)
+#define IWX_SCAN_CONFIG_RATE_12M (1 << 2)
+#define IWX_SCAN_CONFIG_RATE_18M (1 << 3)
+#define IWX_SCAN_CONFIG_RATE_24M (1 << 4)
+#define IWX_SCAN_CONFIG_RATE_36M (1 << 5)
+#define IWX_SCAN_CONFIG_RATE_48M (1 << 6)
+#define IWX_SCAN_CONFIG_RATE_54M (1 << 7)
+/* CCK basic rates */
+#define IWX_SCAN_CONFIG_RATE_1M (1 << 8)
+#define IWX_SCAN_CONFIG_RATE_2M (1 << 9)
+#define IWX_SCAN_CONFIG_RATE_5M (1 << 10)
+#define IWX_SCAN_CONFIG_RATE_11M (1 << 11)
+
+/* Bits 16-27 are for supported rates */
+#define IWX_SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+
+#define IWX_CHANNEL_FLAG_EBS (1 << 0)
+#define IWX_CHANNEL_FLAG_ACCURATE_EBS (1 << 1)
+#define IWX_CHANNEL_FLAG_EBS_ADD (1 << 2)
+#define IWX_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE (1 << 3)
+
+/**
+ * struct iwx_scan_dwell
+ * @active: default dwell time for active scan
+ * @passive: default dwell time for passive scan
+ * @fragmented: default dwell time for fragmented scan
+ * @extended: default dwell time for channels 1, 6 and 11
+ */
+struct iwx_scan_dwell {
+ uint8_t active;
+ uint8_t passive;
+ uint8_t fragmented;
+ uint8_t extended;
+} __packed;
+
+
+#define IWX_SCAN_TWO_LMACS 2
+#define IWX_SCAN_LB_LMAC_IDX 0 /* low-band */
+#define IWX_SCAN_HB_LMAC_IDX 1 /* high-band */
+
+/**
+ * struct iwx_scan_config
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell_active: default dwell time for active scan
+ * @dwell_passive: default dwell time for passive scan
+ * @dwell_fragmented: default dwell time for fragmented scan
+ * @dwell_extended: default dwell time for channels 1, 6 and 11
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwx_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwx_scan_config {
+ uint32_t flags;
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+ uint32_t legacy_rates;
+ uint32_t out_of_channel_time[IWX_SCAN_TWO_LMACS];
+ uint32_t suspend_time[IWX_SCAN_TWO_LMACS];
+ struct iwx_scan_dwell dwell;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t bcast_sta_id;
+ uint8_t channel_flags;
+ uint8_t channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * iwx_umac_scan_flags
+ *@IWX_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan will be resumed when the higher proirity scan is
+ * completed.
+ *@IWX_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+#define IWX_UMAC_SCAN_FLAG_PREEMPTIVE (1 << 0)
+#define IWX_UMAC_SCAN_FLAG_START_NOTIF (1 << 1)
+
+#define IWX_UMAC_SCAN_UID_TYPE_OFFSET 0
+#define IWX_UMAC_SCAN_UID_SEQ_OFFSET 8
+
+#define IWX_UMAC_SCAN_GEN_FLAGS_PERIODIC (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS_OVER_BT (1 << 1)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL (1 << 2)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE (1 << 3)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT (1 << 4)
+#define IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE (1 << 5)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID (1 << 6)
+#define IWX_UMAC_SCAN_GEN_FLAGS_FRAGMENTED (1 << 7)
+#define IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED (1 << 8)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MATCH (1 << 9)
+#define IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL (1 << 10)
+/* Extended dwell is obselete when adaptive dwell is used, making this
+ * bit reusable. Hence, probe request defer is used only when adaptive
+ * dwell is supported. */
+#define IWX_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP (1 << 10)
+#define IWX_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED (1 << 11)
+#define IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL (1 << 13)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME (1 << 14)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE (1 << 15)
+
+/**
+ * UMAC scan general flags #2
+ * @IWX_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
+ * notification per channel or not.
+ * @IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
+ */
+#define IWX_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER (1 << 1)
+
+/**
+ * struct iwx_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan iterations on one channel.
+ */
+struct iwx_scan_channel_cfg_umac {
+ uint32_t flags;
+ uint8_t channel_num;
+ uint8_t iter_count;
+ uint16_t iter_interval;
+} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
+
+/**
+ * struct iwx_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwx_scan_umac_schedule {
+ uint16_t interval;
+ uint8_t iter_count;
+ uint8_t reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwx_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwx_scan_req_umac_tail_v1 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwx_scan_probe_req_v1 preq;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwx_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwx_scan_req_umac_tail_v2 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_2 */
+ struct iwx_scan_probe_req preq;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwx_scan_umac_chan_param
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ */
+struct iwx_scan_umac_chan_param {
+ uint8_t flags;
+ uint8_t count;
+ uint16_t reserved;
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+
+#define IWX_SCAN_LB_LMAC_IDX 0
+#define IWX_SCAN_HB_LMAC_IDX 1
+
+/**
+ * struct iwx_scan_req_umac
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @extended_dwell: dwell time for channels 1, 6 and 11
+ * @active_dwell: dwell time for active scan per LMAC
+ * @passive_dwell: dwell time for passive scan per LMAC
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @general_flags2: &enum iwl_umac_scan_general_flags2
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ * are 2 LMACs (high band and low band)
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @num_of_fragments: Number of fragments needed for full coverage per band.
+ * Relevant only for fragmented scan.
+ * @channel: &struct iwx_scan_umac_chan_param
+ * @reserved: for future use and alignment
+ * @reserved3: for future use and alignment
+ * @data: &struct iwx_scan_channel_cfg_umac and
+ * &struct iwx_scan_req_umac_tail
+ */
+struct iwx_scan_req_umac {
+ uint32_t flags;
+ uint32_t uid;
+ uint32_t ooc_priority;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
+ uint16_t general_flags;
+ uint8_t reserved;
+ uint8_t scan_start_mac_id;
+ union {
+ struct {
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time;
+ uint32_t suspend_time;
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ struct {
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint8_t adwell_default_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t reserved3;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
+ struct {
+ uint8_t active_dwell[2];
+ uint8_t reserved2;
+ uint8_t adwell_default_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t general_flags2;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[2];
+ uint8_t num_of_fragments[2];
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+ struct {
+ uint8_t active_dwell[2];
+ uint8_t adwell_default_hb_n_aps;
+ uint8_t adwell_default_lb_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t general_flags2;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[2];
+ uint8_t num_of_fragments[2];
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */
+ };
+} __packed;
+
+#define IWX_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwx_scan_req_umac)
+#define IWX_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWX_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWX_SCAN_REQ_UMAC_SIZE_V1 36
+
+/**
+ * struct iwx_umac_scan_abort
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwx_umac_scan_abort {
+ uint32_t uid;
+ uint32_t flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwx_umac_scan_complete
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @scan status: &enum iwx_scan_offload_complete_status
+ * @ebs_status: &enum iwx_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwx_umac_scan_complete {
+ uint32_t uid;
+ uint8_t last_schedule;
+ uint8_t last_iter;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_from_last_iter;
+ uint32_t reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWX_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwx_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @channel: channel where the match occurred
+ * @energy:
+ * @matching_feature:
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in tue scan offload request
+ */
+struct iwx_scan_offload_profile_match {
+ uint8_t bssid[ETHER_ADDR_LEN];
+ uint16_t reserved;
+ uint8_t channel;
+ uint8_t energy;
+ uint8_t matching_feature;
+ uint8_t matching_channels[IWX_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwx_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwx_scan_offload_profiles_query {
+ uint32_t matched_profiles;
+ uint32_t last_scan_age;
+ uint32_t n_scans_done;
+ uint32_t gp2_d0u;
+ uint32_t gp2_invoked;
+ uint8_t resume_while_scanning;
+ uint8_t self_recovery;
+ uint16_t reserved;
+ struct iwx_scan_offload_profile_match matches[IWX_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
+/**
+ * struct iwx_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwx_umac_scan_iter_complete_notif {
+ uint32_t uid;
+ uint8_t scanned_channels;
+ uint8_t status;
+ uint8_t bt_status;
+ uint8_t last_channel;
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ struct iwx_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWX_GSCAN_START_CMD 0x0
+#define IWX_GSCAN_STOP_CMD 0x1
+#define IWX_GSCAN_SET_HOTLIST_CMD 0x2
+#define IWX_GSCAN_RESET_HOTLIST_CMD 0x3
+#define IWX_GSCAN_SET_SIGNIFICANT_CHANGE_CMD 0x4
+#define IWX_GSCAN_RESET_SIGNIFICANT_CHANGE_CMD 0x5
+#define IWX_GSCAN_SIGNIFICANT_CHANGE_EVENT 0xFD
+#define IWX_GSCAN_HOTLIST_CHANGE_EVENT 0xFE
+#define IWX_GSCAN_RESULTS_AVAILABLE_EVENT 0xFF
+
+/* STA API */
+
+/**
+ * flags for the ADD_STA host command
+ * @IWX_STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @IWX_STA_FLG_REDUCED_TX_PWR_DATA:
+ * @IWX_STA_FLG_DISABLE_TX: set if TX should be disabled
+ * @IWX_STA_FLG_PS: set if STA is in Power Save
+ * @IWX_STA_FLG_INVALID: set if STA is invalid
+ * @IWX_STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @IWX_STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @IWX_STA_FLG_DRAIN_FLOW: drain flow
+ * @IWX_STA_FLG_PAN: STA is for PAN interface
+ * @IWX_STA_FLG_CLASS_AUTH:
+ * @IWX_STA_FLG_CLASS_ASSOC:
+ * @IWX_STA_FLG_CLASS_MIMO_PROT:
+ * @IWX_STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @IWX_STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @IWX_STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @IWX_STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @IWX_STA_FLG_MFP_EN: Management Frame Protection
+ */
+#define IWX_STA_FLG_REDUCED_TX_PWR_CTRL (1 << 3)
+#define IWX_STA_FLG_REDUCED_TX_PWR_DATA (1 << 6)
+
+#define IWX_STA_FLG_DISABLE_TX (1 << 4)
+
+#define IWX_STA_FLG_PS (1 << 8)
+#define IWX_STA_FLG_DRAIN_FLOW (1 << 12)
+#define IWX_STA_FLG_PAN (1 << 13)
+#define IWX_STA_FLG_CLASS_AUTH (1 << 14)
+#define IWX_STA_FLG_CLASS_ASSOC (1 << 15)
+#define IWX_STA_FLG_RTS_MIMO_PROT (1 << 17)
+
+#define IWX_STA_FLG_MAX_AGG_SIZE_SHIFT 19
+#define IWX_STA_FLG_MAX_AGG_SIZE_8K (0 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_16K (1 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_32K (2 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_64K (3 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_128K (4 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_256K (5 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_512K (6 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_1024K (7 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_MSK (7 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+
+#define IWX_STA_FLG_AGG_MPDU_DENS_SHIFT 23
+#define IWX_STA_FLG_AGG_MPDU_DENS_2US (4 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_4US (5 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_8US (6 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_16US (7 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_MSK (7 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+
+#define IWX_STA_FLG_FAT_EN_20MHZ (0 << 26)
+#define IWX_STA_FLG_FAT_EN_40MHZ (1 << 26)
+#define IWX_STA_FLG_FAT_EN_80MHZ (2 << 26)
+#define IWX_STA_FLG_FAT_EN_160MHZ (3 << 26)
+#define IWX_STA_FLG_FAT_EN_MSK (3 << 26)
+
+#define IWX_STA_FLG_MIMO_EN_SISO (0 << 28)
+#define IWX_STA_FLG_MIMO_EN_MIMO2 (1 << 28)
+#define IWX_STA_FLG_MIMO_EN_MIMO3 (2 << 28)
+#define IWX_STA_FLG_MIMO_EN_MSK (3 << 28)
+
+/**
+ * key flags for the ADD_STA host command
+ * @IWX_STA_KEY_FLG_NO_ENC: no encryption
+ * @IWX_STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @IWX_STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @IWX_STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @IWX_STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @IWX_STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @IWX_STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @IWX_STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
+ * @IWX_STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @IWX_STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @IWX_STA_KEY_NOT_VALID: key is invalid
+ * @IWX_STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @IWX_STA_KEY_MULTICAST: set for multical key
+ * @IWX_STA_KEY_MFP: key is used for Management Frame Protection
+ */
+#define IWX_STA_KEY_FLG_NO_ENC (0 << 0)
+#define IWX_STA_KEY_FLG_WEP (1 << 0)
+#define IWX_STA_KEY_FLG_CCM (2 << 0)
+#define IWX_STA_KEY_FLG_TKIP (3 << 0)
+#define IWX_STA_KEY_FLG_EXT (4 << 0)
+#define IWX_STA_KEY_FLG_CMAC (6 << 0)
+#define IWX_STA_KEY_FLG_ENC_UNKNOWN (7 << 0)
+#define IWX_STA_KEY_FLG_EN_MSK (7 << 0)
+#define IWX_STA_KEY_FLG_WEP_KEY_MAP (1 << 3)
+#define IWX_STA_KEY_FLG_KEYID_POS 8
+#define IWX_STA_KEY_FLG_KEYID_MSK (3 << IWX_STA_KEY_FLG_KEYID_POS)
+#define IWX_STA_KEY_NOT_VALID (1 << 11)
+#define IWX_STA_KEY_FLG_WEP_13BYTES (1 << 12)
+#define IWX_STA_KEY_MULTICAST (1 << 14)
+#define IWX_STA_KEY_MFP (1 << 15)
+
+/**
+ * indicate to the fw what flag are being changed
+ * @IWX_STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
+ * @IWX_STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @IWX_STA_MODIFY_TX_RATE: unused
+ * @IWX_STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @IWX_STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @IWX_STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @IWX_STA_MODIFY_PROT_TH:
+ * @IWX_STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+#define IWX_STA_MODIFY_QUEUE_REMOVAL (1 << 0)
+#define IWX_STA_MODIFY_TID_DISABLE_TX (1 << 1)
+#define IWX_STA_MODIFY_TX_RATE (1 << 2)
+#define IWX_STA_MODIFY_ADD_BA_TID (1 << 3)
+#define IWX_STA_MODIFY_REMOVE_BA_TID (1 << 4)
+#define IWX_STA_MODIFY_SLEEPING_STA_TX_COUNT (1 << 5)
+#define IWX_STA_MODIFY_PROT_TH (1 << 6)
+#define IWX_STA_MODIFY_QUEUES (1 << 7)
+
+#define IWX_STA_MODE_MODIFY 1
+
+/**
+ * type of sleep of the station
+ * @IWX_STA_SLEEP_STATE_AWAKE:
+ * @IWX_STA_SLEEP_STATE_PS_POLL:
+ * @IWX_STA_SLEEP_STATE_UAPSD:
+ * @IWX_STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
+ */
+#define IWX_STA_SLEEP_STATE_AWAKE 0
+#define IWX_STA_SLEEP_STATE_PS_POLL (1 << 0)
+#define IWX_STA_SLEEP_STATE_UAPSD (1 << 1)
+#define IWX_STA_SLEEP_STATE_MOREDATA (1 << 2)
+
+/* STA ID and color bits definitions */
+#define IWX_STA_ID_SEED (0x0f)
+#define IWX_STA_ID_POS (0)
+#define IWX_STA_ID_MSK (IWX_STA_ID_SEED << IWX_STA_ID_POS)
+
+#define IWX_STA_COLOR_SEED (0x7)
+#define IWX_STA_COLOR_POS (4)
+#define IWX_STA_COLOR_MSK (IWX_STA_COLOR_SEED << IWX_STA_COLOR_POS)
+
+#define IWX_STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+ (((id_n_color) & IWX_STA_COLOR_MSK) >> IWX_STA_COLOR_POS)
+#define IWX_STA_ID_N_COLOR_GET_ID(id_n_color) \
+ (((id_n_color) & IWX_STA_ID_MSK) >> IWX_STA_ID_POS)
+
+#define IWX_STA_KEY_MAX_NUM (16)
+#define IWX_STA_KEY_IDX_INVALID (0xff)
+#define IWX_STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWX_MAX_GLOBAL_KEYS (4)
+#define IWX_STA_KEY_LEN_WEP40 (5)
+#define IWX_STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwx_keyinfo - key information
+ * @key_flags: type %iwx_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwx_keyinfo {
+ uint16_t key_flags;
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved1;
+ uint16_t tkip_rx_ttak[5];
+ uint8_t key_offset;
+ uint8_t reserved2;
+ uint8_t key[16];
+ uint64_t tx_secur_seq_cnt;
+ uint64_t hw_tkip_mic_rx_key;
+ uint64_t hw_tkip_mic_tx_key;
+} __packed;
+
+#define IWX_ADD_STA_STATUS_MASK 0xFF
+#define IWX_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWX_ADD_STA_BAID_MASK 0x7F00
+#define IWX_ADD_STA_BAID_SHIFT 8
+
+/**
+ * struct iwx_add_sta_cmd - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station.
+ * Obselete for new TX API (9 and above).
+ * @rx_ba_window: aggregation window size
+ * @sp_length: the size of the SP in actual number of frames
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwx_add_sta_cmd {
+ uint8_t add_modify;
+ uint8_t awake_acs;
+ uint16_t tid_disable_tx;
+ uint32_t mac_id_n_color;
+ uint8_t addr[ETHER_ADDR_LEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ uint16_t reserved2;
+ uint8_t sta_id;
+ uint8_t modify_mask;
+ uint16_t reserved3;
+ uint32_t station_flags;
+ uint32_t station_flags_msk;
+ uint8_t add_immediate_ba_tid;
+ uint8_t remove_immediate_ba_tid;
+ uint16_t add_immediate_ba_ssn;
+ uint16_t sleep_tx_count;
+ uint8_t sleep_state_flags;
+ uint8_t station_type;
+ uint16_t assoc_id;
+ uint16_t beamform_flags;
+ uint32_t tfd_queue_msk;
+ uint16_t rx_ba_window;
+ uint8_t sp_length;
+ uint8_t uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
+
+/**
+ * FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWX_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWX_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ * and probe responses.
+ * @IWX_STA_MULTICAST: multicast traffic,
+ * @IWX_STA_TDLS_LINK: TDLS link station
+ * @IWX_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
+ */
+#define IWX_STA_LINK 0
+#define IWX_STA_GENERAL_PURPOSE 1
+#define IWX_STA_MULTICAST 2
+#define IWX_STA_TDLS_LINK 3
+#define IWX_STA_AUX_ACTIVITY 4
+
+/**
+ * struct iwx_add_sta_key_cmd - add/modify sta key
+ * ( IWX_REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwx_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwx_add_sta_key_cmd {
+ uint8_t sta_id;
+ uint8_t key_offset;
+ uint16_t key_flags;
+ uint8_t key[16];
+ uint8_t key2[16];
+ uint8_t rx_secur_seq_cnt[16];
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved;
+ uint16_t tkip_rx_ttak[5];
+} __packed; /* IWX_ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
+ * status in the response to ADD_STA command
+ * @IWX_ADD_STA_SUCCESS: operation was executed successfully
+ * @IWX_ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @IWX_ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @IWX_ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station
+ * that doesn't exist.
+ */
+#define IWX_ADD_STA_SUCCESS 0x1
+#define IWX_ADD_STA_STATIONS_OVERLOAD 0x2
+#define IWX_ADD_STA_IMMEDIATE_BA_FAILURE 0x4
+#define IWX_ADD_STA_MODIFY_NON_EXISTING_STA 0x8
+
+/**
+ * struct iwx_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( IWX_REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwx_rm_sta_cmd {
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed; /* IWX_REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_mgmt_mcast_key_cmd
+ * ( IWX_MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwx_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwx_mgmt_mcast_key_cmd {
+ uint32_t ctrl_flags;
+ uint8_t IGTK[16];
+ uint8_t K1[16];
+ uint8_t K2[16];
+ uint32_t key_id;
+ uint32_t sta_id;
+ uint64_t receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwx_wep_key {
+ uint8_t key_index;
+ uint8_t key_offset;
+ uint16_t reserved1;
+ uint8_t key_size;
+ uint8_t reserved2[3];
+ uint8_t key[16];
+} __packed;
+
+struct iwx_wep_key_cmd {
+ uint32_t mac_id_n_color;
+ uint8_t num_keys;
+ uint8_t decryption_type;
+ uint8_t flags;
+ uint8_t reserved;
+ struct iwx_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+/*
+ * BT coex
+ */
+
+#define IWX_BT_COEX_DISABLE 0x0
+#define IWX_BT_COEX_NW 0x1
+#define IWX_BT_COEX_BT 0x2
+#define IWX_BT_COEX_WIFI 0x3
+/* BT_COEX_MODES_E */
+
+#define IWX_BT_COEX_MPLUT_ENABLED (1 << 0)
+#define IWX_BT_COEX_MPLUT_BOOST_ENABLED (1 << 1)
+#define IWX_BT_COEX_SYNC2SCO_ENABLED (1 << 2)
+#define IWX_BT_COEX_CORUN_ENABLED (1 << 3)
+#define IWX_BT_COEX_HIGH_BAND_RET (1 << 4)
+/* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+/**
+ * struct iwx_bt_coex_cmd - bt coex configuration command
+ * @mode: enum %iwx_bt_coex_mode
+ * @enabled_modules: enum %iwx_bt_coex_enabled_modules
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwx_bt_coex_cmd {
+ uint32_t mode;
+ uint32_t enabled_modules;
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+
+/*
+ * Location Aware Regulatory (LAR) API - MCC updates
+ */
+
+/**
+ * struct iwx_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwx_mcc_source
+ * @reserved: reserved for alignment
+ * @key: integrity key for MCC API OEM testing
+ * @reserved2: reserved
+ */
+struct iwx_mcc_update_cmd {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved;
+ uint32_t key;
+ uint32_t reserved2[5];
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
+
+/**
+ * iwx_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwx_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwx_mcc_source
+ * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @reserved: reserved.
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwx_mcc_update_resp {
+ uint32_t status;
+ uint16_t mcc;
+ uint8_t cap;
+ uint8_t source_id;
+ uint16_t time;
+ uint16_t reserved;
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */
+
+#define IWX_MCC_SOURCE_OLD_FW 0
+#define IWX_MCC_SOURCE_ME 1
+#define IWX_MCC_SOURCE_BIOS 2
+#define IWX_MCC_SOURCE_3G_LTE_HOST 3
+#define IWX_MCC_SOURCE_3G_LTE_DEVICE 4
+#define IWX_MCC_SOURCE_WIFI 5
+#define IWX_MCC_SOURCE_RESERVED 6
+#define IWX_MCC_SOURCE_DEFAULT 7
+#define IWX_MCC_SOURCE_UNINITIALIZED 8
+#define IWX_MCC_SOURCE_MCC_API 9
+#define IWX_MCC_SOURCE_GET_CURRENT 0x10
+#define IWX_MCC_SOURCE_GETTING_MCC_TEST_MODE 0x11
+
+/*
+ * From Linux commit ab02165ccec4c78162501acedeef1a768acdb811:
+ * As the firmware is slowly running out of command IDs and grouping of
+ * commands is desirable anyway, the firmware is extending the command
+ * header from 4 bytes to 8 bytes to introduce a group (in place of the
+ * former flags field, since that's always 0 on commands and thus can
+ * be easily used to distinguish between the two).
+ *
+ * These functions retrieve specific information from the id field in
+ * the iwx_host_cmd struct which contains the command id, the group id,
+ * and the version of the command.
+*/
+static inline uint8_t
+iwx_cmd_opcode(uint32_t cmdid)
+{
+ return cmdid & 0xff;
+}
+
+static inline uint8_t
+iwx_cmd_groupid(uint32_t cmdid)
+{
+ return ((cmdid & 0Xff00) >> 8);
+}
+
+static inline uint8_t
+iwx_cmd_version(uint32_t cmdid)
+{
+ return ((cmdid & 0xff0000) >> 16);
+}
+
+static inline uint32_t
+iwx_cmd_id(uint8_t opcode, uint8_t groupid, uint8_t version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make uint16_t wide id out of uint8_t group and opcode */
+#define IWX_WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+struct iwx_cmd_header {
+ uint8_t code;
+ uint8_t flags;
+ uint8_t idx;
+ uint8_t qid;
+} __packed;
+
+struct iwx_cmd_header_wide {
+ uint8_t opcode;
+ uint8_t group_id;
+ uint8_t idx;
+ uint8_t qid;
+ uint16_t length;
+ uint8_t reserved;
+ uint8_t version;
+} __packed;
+
+#define IWX_POWER_SCHEME_CAM 1
+#define IWX_POWER_SCHEME_BPS 2
+#define IWX_POWER_SCHEME_LP 3
+
+#define IWX_DEF_CMD_PAYLOAD_SIZE 320
+#define IWX_MAX_CMD_PAYLOAD_SIZE ((4096 - 4) - sizeof(struct iwx_cmd_header))
+#define IWX_CMD_FAILED_MSK 0x40
+
+/**
+ * struct iwx_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
+struct iwx_device_cmd {
+ union {
+ struct {
+ struct iwx_cmd_header hdr;
+ uint8_t data[IWX_DEF_CMD_PAYLOAD_SIZE];
+ };
+ struct {
+ struct iwx_cmd_header_wide hdr_wide;
+ uint8_t data_wide[IWX_DEF_CMD_PAYLOAD_SIZE -
+ sizeof(struct iwx_cmd_header_wide) +
+ sizeof(struct iwx_cmd_header)];
+ };
+ };
+} __packed;
+
+struct iwx_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-26: Reserved
+ * 25: Offload enabled
+ * 24: RPF enabled
+ * 23: RSS enabled
+ * 22: Checksum enabled
+ * 21-16: RX queue
+ * 15-14: Reserved
+ * 13-00: RX frame size
+ */
+ uint32_t len_n_flags;
+ struct iwx_cmd_header hdr;
+ uint8_t data[];
+} __packed;
+
+#define IWX_FH_RSCSR_FRAME_SIZE_MSK 0x00003fff
+#define IWX_FH_RSCSR_FRAME_INVALID 0x55550000
+#define IWX_FH_RSCSR_FRAME_ALIGN 0x40
+#define IWX_FH_RSCSR_RPA_EN (1 << 25)
+#define IWX_FH_RSCSR_RXQ_POS 16
+#define IWX_FH_RSCSR_RXQ_MASK 0x3F0000
+
+static uint32_t
+iwx_rx_packet_len(const struct iwx_rx_packet *pkt)
+{
+
+ return le32toh(pkt->len_n_flags) & IWX_FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static uint32_t
+iwx_rx_packet_payload_len(const struct iwx_rx_packet *pkt)
+{
+
+ return iwx_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
+
+#define IWX_MIN_DBM -100
+#define IWX_MAX_DBM -33 /* realistic guess */
+
+#define IWX_READ_8(sc, reg) \
+ bus_space_read_8((sc)->sc_st, (sc)->sc_sh, (reg))
+
+#define IWX_WRITE_8(sc, reg, val) \
+ bus_space_write_8((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_READ(sc, reg) \
+ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
+
+#define IWX_WRITE(sc, reg, val) \
+ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_WRITE_1(sc, reg, val) \
+ bus_space_write_1((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_SETBITS(sc, reg, mask) \
+ IWX_WRITE(sc, reg, IWX_READ(sc, reg) | (mask))
+
+#define IWX_CLRBITS(sc, reg, mask) \
+ IWX_WRITE(sc, reg, IWX_READ(sc, reg) & ~(mask))
+
+#define IWX_BARRIER_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_WRITE)
+
+#define IWX_BARRIER_READ_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
diff --git a/sys/dev/pci/if_iwxvar.h b/sys/dev/pci/if_iwxvar.h
new file mode 100644
index 00000000000..fb6f0c62e68
--- /dev/null
+++ b/sys/dev/pci/if_iwxvar.h
@@ -0,0 +1,537 @@
+/* $OpenBSD: if_iwxvar.h,v 1.1 2020/02/15 08:47:14 stsp Exp $ */
+
+/*
+ * Copyright (c) 2014 genua mbh <info@genua.de>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct iwx_rx_radiotap_header {
+ struct ieee80211_radiotap_header wr_ihdr;
+ uint64_t wr_tsft;
+ uint8_t wr_flags;
+ uint8_t wr_rate;
+ uint16_t wr_chan_freq;
+ uint16_t wr_chan_flags;
+ int8_t wr_dbm_antsignal;
+ int8_t wr_dbm_antnoise;
+} __packed;
+
+#define IWX_RX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_TSFT) | \
+ (1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
+
+struct iwx_tx_radiotap_header {
+ struct ieee80211_radiotap_header wt_ihdr;
+ uint8_t wt_flags;
+ uint8_t wt_rate;
+ uint16_t wt_chan_freq;
+ uint16_t wt_chan_flags;
+ uint8_t wt_hwqueue;
+} __packed;
+
+#define IWX_TX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_HWQUEUE))
+
+#define IWX_UCODE_SECT_MAX 39
+#define IWX_FWDMASEGSZ (192*1024)
+#define IWX_FWDMASEGSZ_8000 (320*1024)
+/* sanity check value */
+#define IWX_FWMAXSIZE (2*1024*1024)
+
+/*
+ * fw_status is used to determine if we've already parsed the firmware file
+ *
+ * In addition to the following, status < 0 ==> -error
+ */
+#define IWX_FW_STATUS_NONE 0
+#define IWX_FW_STATUS_INPROGRESS 1
+#define IWX_FW_STATUS_DONE 2
+
+enum iwx_ucode_type {
+ IWX_UCODE_TYPE_REGULAR,
+ IWX_UCODE_TYPE_INIT,
+ IWX_UCODE_TYPE_WOW,
+ IWX_UCODE_TYPE_REGULAR_USNIFFER,
+ IWX_UCODE_TYPE_MAX
+};
+
+struct iwx_fw_info {
+ void *fw_rawdata;
+ size_t fw_rawsize;
+ int fw_status;
+
+ struct iwx_fw_sects {
+ struct iwx_fw_onesect {
+ void *fws_data;
+ uint32_t fws_len;
+ uint32_t fws_devoff;
+ } fw_sect[IWX_UCODE_SECT_MAX];
+ size_t fw_totlen;
+ int fw_count;
+ } fw_sects[IWX_UCODE_TYPE_MAX];
+
+ /* FW debug data parsed for driver usage */
+ int dbg_dest_tlv_init;
+ uint8_t *dbg_dest_ver;
+ uint8_t n_dest_reg;
+ struct iwx_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+
+ struct iwx_fw_dbg_conf_tlv *dbg_conf_tlv[IWX_FW_DBG_CONF_MAX];
+ size_t dbg_conf_tlv_len[IWX_FW_DBG_CONF_MAX];
+ struct iwx_fw_dbg_trigger_tlv *dbg_trigger_tlv[IWX_FW_DBG_TRIGGER_MAX];
+ size_t dbg_trigger_tlv_len[IWX_FW_DBG_TRIGGER_MAX];
+ struct iwx_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+ size_t n_mem_tlv;
+};
+
+struct iwx_nvm_data {
+ int n_hw_addrs;
+ uint8_t hw_addr[ETHER_ADDR_LEN];
+
+ uint8_t calib_version;
+ uint16_t calib_voltage;
+
+ uint16_t raw_temperature;
+ uint16_t kelvin_temperature;
+ uint16_t kelvin_voltage;
+ uint16_t xtal_calib[2];
+
+ int sku_cap_band_24GHz_enable;
+ int sku_cap_band_52GHz_enable;
+ int sku_cap_11n_enable;
+ int sku_cap_amt_enable;
+ int sku_cap_ipan_enable;
+ int sku_cap_mimo_disable;
+
+ uint8_t radio_cfg_type;
+ uint8_t radio_cfg_step;
+ uint8_t radio_cfg_dash;
+ uint8_t radio_cfg_pnum;
+ uint8_t valid_tx_ant, valid_rx_ant;
+
+ uint16_t nvm_version;
+ uint8_t max_tx_pwr_half_dbm;
+};
+
+/* max bufs per tfd the driver will use */
+#define IWX_MAX_CMD_TBS_PER_TFD 2
+
+struct iwx_host_cmd {
+ const void *data[IWX_MAX_CMD_TBS_PER_TFD];
+ struct iwx_rx_packet *resp_pkt;
+ size_t resp_pkt_len;
+ unsigned long _rx_page_addr;
+ uint32_t _rx_page_order;
+ int handler_status;
+
+ uint32_t flags;
+ uint16_t len[IWX_MAX_CMD_TBS_PER_TFD];
+ uint8_t dataflags[IWX_MAX_CMD_TBS_PER_TFD];
+ uint32_t id;
+};
+
+/*
+ * DMA glue is from iwn
+ */
+
+struct iwx_dma_info {
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_addr_t paddr;
+ void *vaddr;
+ bus_size_t size;
+};
+
+#define IWX_TX_RING_COUNT IWX_DEFAULT_QUEUE_SIZE
+#define IWX_TX_RING_LOMARK 192
+#define IWX_TX_RING_HIMARK 224
+
+struct iwx_tx_data {
+ bus_dmamap_t map;
+ bus_addr_t cmd_paddr;
+ struct mbuf *m;
+ struct iwx_node *in;
+ int done;
+};
+
+struct iwx_tx_ring {
+ struct iwx_dma_info desc_dma;
+ struct iwx_dma_info cmd_dma;
+ struct iwx_dma_info bc_tbl;
+ struct iwx_tfh_tfd *desc;
+ struct iwx_device_cmd *cmd;
+ struct iwx_tx_data data[IWX_TX_RING_COUNT];
+ int qid;
+ int queued;
+ int cur;
+ int tail;
+};
+
+#define IWX_RX_MQ_RING_COUNT 512
+/* Linux driver optionally uses 8k buffer */
+#define IWX_RBUF_SIZE 4096
+
+struct iwx_rx_data {
+ struct mbuf *m;
+ bus_dmamap_t map;
+};
+
+struct iwx_rx_ring {
+ struct iwx_dma_info free_desc_dma;
+ struct iwx_dma_info stat_dma;
+ struct iwx_dma_info used_desc_dma;
+ struct iwx_dma_info buf_dma;
+ void *desc;
+ struct iwx_rb_status *stat;
+ struct iwx_rx_data data[IWX_RX_MQ_RING_COUNT];
+ int cur;
+};
+
+#define IWX_FLAG_USE_ICT 0x01 /* using Interrupt Cause Table */
+#define IWX_FLAG_RFKILL 0x02 /* radio kill switch is set */
+#define IWX_FLAG_SCANNING 0x04 /* scan in progress */
+#define IWX_FLAG_MAC_ACTIVE 0x08 /* MAC context added to firmware */
+#define IWX_FLAG_BINDING_ACTIVE 0x10 /* MAC->PHY binding added to firmware */
+#define IWX_FLAG_STA_ACTIVE 0x20 /* AP added to firmware station table */
+#define IWX_FLAG_TE_ACTIVE 0x40 /* time event is scheduled */
+#define IWX_FLAG_HW_ERR 0x80 /* hardware error occurred */
+#define IWX_FLAG_SHUTDOWN 0x100 /* shutting down; new tasks forbidden */
+#define IWX_FLAG_BGSCAN 0x200 /* background scan in progress */
+
+struct iwx_ucode_status {
+ uint32_t uc_lmac_error_event_table[2];
+ uint32_t uc_umac_error_event_table;
+ uint32_t uc_log_event_table;
+ unsigned int error_event_table_tlv_status;
+
+ int uc_ok;
+ int uc_intr;
+};
+
+#define IWX_ERROR_EVENT_TABLE_LMAC1 (1 << 0)
+#define IWX_ERROR_EVENT_TABLE_LMAC2 (1 << 1)
+#define IWX_ERROR_EVENT_TABLE_UMAC (1 << 2)
+
+#define IWX_CMD_RESP_MAX PAGE_SIZE
+
+/* lower blocks contain EEPROM image and calibration data */
+#define IWX_OTP_LOW_IMAGE_SIZE_FAMILY_7000 16384
+#define IWX_OTP_LOW_IMAGE_SIZE_FAMILY_8000 32768
+
+#define IWX_TE_SESSION_PROTECTION_MAX_TIME_MS 1000
+#define IWX_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+enum IWX_CMD_MODE {
+ IWX_CMD_ASYNC = (1 << 0),
+ IWX_CMD_WANT_RESP = (1 << 1),
+ IWX_CMD_SEND_IN_RFKILL = (1 << 2),
+};
+enum iwx_hcmd_dataflag {
+ IWX_HCMD_DFL_NOCOPY = (1 << 0),
+ IWX_HCMD_DFL_DUP = (1 << 1),
+};
+
+#define IWX_NUM_PAPD_CH_GROUPS 9
+#define IWX_NUM_TXP_CH_GROUPS 9
+
+struct iwx_phy_ctxt {
+ uint16_t id;
+ uint16_t color;
+ uint32_t ref;
+ struct ieee80211_channel *channel;
+};
+
+struct iwx_bf_data {
+ int bf_enabled; /* filtering */
+ int ba_enabled; /* abort */
+ int ave_beacon_signal;
+ int last_cqm_event;
+};
+
+/**
+ * struct iwx_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @fw_cnt: total number of items in array
+ * @paging: paging dram data
+ * @paging_cnt: total number of items in array
+ */
+struct iwx_self_init_dram {
+ struct iwx_dma_info *fw;
+ int fw_cnt;
+ struct iwx_dma_info *paging;
+ int paging_cnt;
+};
+
+struct iwx_softc {
+ struct device sc_dev;
+ struct ieee80211com sc_ic;
+ int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);
+ int sc_newstate_pending;
+
+ struct ieee80211_amrr sc_amrr;
+ struct timeout sc_calib_to;
+
+ struct task init_task; /* NB: not reference-counted */
+ struct refcnt task_refs;
+ struct task newstate_task;
+ enum ieee80211_state ns_nstate;
+ int ns_arg;
+
+ /* Task for firmware BlockAck setup/teardown and its arguments. */
+ struct task ba_task;
+ int ba_start;
+ int ba_tid;
+ uint16_t ba_ssn;
+ uint16_t ba_winsize;
+
+ /* Task for HT protection updates. */
+ struct task htprot_task;
+
+ bus_space_tag_t sc_st;
+ bus_space_handle_t sc_sh;
+ bus_size_t sc_sz;
+ bus_dma_tag_t sc_dmat;
+ pci_chipset_tag_t sc_pct;
+ pcitag_t sc_pcitag;
+ const void *sc_ih;
+ int sc_msix;
+
+ /* TX scheduler rings. */
+ struct iwx_dma_info sched_dma;
+ uint32_t sched_base;
+
+ /* TX/RX rings. */
+ struct iwx_tx_ring txq[IWX_MAX_QUEUES];
+ struct iwx_rx_ring rxq;
+ int qfullmsk;
+
+ int sc_sf_state;
+
+ /* ICT table. */
+ struct iwx_dma_info ict_dma;
+ int ict_cur;
+
+ int sc_hw_rev;
+#define IWX_SILICON_A_STEP 0
+#define IWX_SILICON_B_STEP 1
+#define IWX_SILICON_C_STEP 2
+#define IWX_SILICON_D_STEP 3
+ int sc_hw_id;
+ int sc_device_family;
+#define IWX_DEVICE_FAMILY_22000 1
+#define IWX_DEVICE_FAMILY_22560 2
+
+ struct iwx_dma_info fw_dma;
+
+ struct iwx_dma_info ctxt_info_dma;
+ struct iwx_self_init_dram init_dram;
+
+ int sc_fw_chunk_done;
+ int sc_init_complete;
+#define IWX_INIT_COMPLETE 0x01
+#define IWX_CALIB_COMPLETE 0x02
+
+ struct iwx_ucode_status sc_uc;
+ char sc_fwver[32];
+
+ int sc_capaflags;
+ int sc_capa_max_probe_len;
+ int sc_capa_n_scan_channels;
+ uint8_t sc_ucode_api[howmany(IWX_NUM_UCODE_TLV_API, NBBY)];
+ uint8_t sc_enabled_capa[howmany(IWX_NUM_UCODE_TLV_CAPA, NBBY)];
+#define IWX_MAX_FW_CMD_VERSIONS 64
+ struct iwx_fw_cmd_version cmd_versions[IWX_MAX_FW_CMD_VERSIONS];
+ int n_cmd_versions;
+
+ int sc_intmask;
+ int sc_flags;
+
+ uint32_t sc_fh_init_mask;
+ uint32_t sc_hw_init_mask;
+ uint32_t sc_fh_mask;
+ uint32_t sc_hw_mask;
+
+ int sc_generation;
+
+ struct rwlock ioctl_rwl;
+
+ int sc_cap_off; /* PCIe caps */
+
+ const char *sc_fwname;
+ bus_size_t sc_fwdmasegsz;
+ size_t sc_nvm_max_section_size;
+ struct iwx_fw_info sc_fw;
+ struct iwx_dma_info fw_mon;
+ int sc_fw_phy_config;
+ struct iwx_tlv_calib_ctrl sc_default_calib[IWX_UCODE_TYPE_MAX];
+
+ struct iwx_nvm_data sc_nvm;
+ struct iwx_bf_data sc_bf;
+
+ int sc_tx_timer;
+ int sc_rx_ba_sessions;
+
+ int sc_scan_last_antenna;
+
+ int sc_fixed_ridx;
+
+ int sc_staid;
+ int sc_nodecolor;
+
+ uint8_t *sc_cmd_resp_pkt[IWX_TX_RING_COUNT];
+ size_t sc_cmd_resp_len[IWX_TX_RING_COUNT];
+ int sc_nic_locks;
+
+ struct taskq *sc_nswq;
+
+ struct iwx_rx_phy_info sc_last_phy_info;
+ int sc_ampdu_ref;
+
+ uint32_t sc_time_event_uid;
+
+ /* phy contexts. we only use the first one */
+ struct iwx_phy_ctxt sc_phyctxt[IWX_NUM_PHY_CTX];
+
+ struct iwx_notif_statistics sc_stats;
+ int sc_noise;
+
+ int sc_ltr_enabled;
+ enum iwx_nvm_type nvm_type;
+
+ int sc_integrated;
+ int sc_tx_with_siso_diversity;
+ int sc_max_tfd_queue_size;
+
+#if NBPFILTER > 0
+ caddr_t sc_drvbpf;
+
+ union {
+ struct iwx_rx_radiotap_header th;
+ uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+ } sc_rxtapu;
+#define sc_rxtap sc_rxtapu.th
+ int sc_rxtap_len;
+
+ union {
+ struct iwx_tx_radiotap_header th;
+ uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+ } sc_txtapu;
+#define sc_txtap sc_txtapu.th
+ int sc_txtap_len;
+#endif
+};
+
+struct iwx_node {
+ struct ieee80211_node in_ni;
+ struct iwx_phy_ctxt *in_phyctxt;
+
+ uint16_t in_id;
+ uint16_t in_color;
+
+ struct ieee80211_amrr_node in_amn;
+ struct ieee80211_mira_node in_mn;
+
+ /* Set in 11n mode if we don't receive ACKs for OFDM frames. */
+ int ht_force_cck;
+
+};
+#define IWX_STATION_ID 0
+#define IWX_AUX_STA_ID 1
+
+#define IWX_ICT_SIZE 4096
+#define IWX_ICT_COUNT (IWX_ICT_SIZE / sizeof (uint32_t))
+#define IWX_ICT_PADDR_SHIFT 12