aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/wmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/wmi.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c162
1 files changed, 104 insertions, 58 deletions
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 70261387d1a5..621019f43531 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -705,6 +705,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
.pdev_bss_chan_info_request_cmdid =
WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -780,6 +781,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -855,6 +857,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +932,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1008,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1803,7 +1808,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ret = -ESHUTDOWN;
(ret != -EAGAIN);
- }), 3*HZ);
+ }), 3 * HZ);
if (ret)
dev_kfree_skb_any(skb);
@@ -2099,34 +2104,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
- enum ieee80211_band band;
-
- switch (phy_mode) {
- case MODE_11A:
- case MODE_11NA_HT20:
- case MODE_11NA_HT40:
- case MODE_11AC_VHT20:
- case MODE_11AC_VHT40:
- case MODE_11AC_VHT80:
- band = IEEE80211_BAND_5GHZ;
- break;
- case MODE_11G:
- case MODE_11B:
- case MODE_11GONLY:
- case MODE_11NG_HT20:
- case MODE_11NG_HT40:
- case MODE_11AC_VHT20_2G:
- case MODE_11AC_VHT40_2G:
- case MODE_11AC_VHT80_2G:
- default:
- band = IEEE80211_BAND_2GHZ;
- }
-
- return band;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -2167,10 +2144,13 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ struct wmi_mgmt_rx_ext_info *ext_info;
size_t pull_len;
u32 msdu_len;
+ u32 len;
- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+ ar->running_fw->fw_file.fw_features)) {
ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
ev_hdr = &ev_v2->hdr.v1;
pull_len = sizeof(*ev_v2);
@@ -2195,6 +2175,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
* trailer with credit update. Trim the excess garbage.
*/
@@ -2211,6 +2197,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
size_t pull_len;
u32 msdu_len;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ u32 len;
ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
ev_hdr = &ev->hdr;
@@ -2231,6 +2219,13 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+
/* Make sure bytes added for padding are removed. */
skb_trim(skb, msdu_len);
@@ -2281,14 +2276,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
+ if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+ status->mactime =
+ __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
* of mgmt rx.
*/
if (channel >= 1 && channel <= 14) {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
} else if (channel >= 36 && channel <= 165) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
@@ -2298,7 +2298,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
- if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
sband = &ar->mac.sbands[status->band];
@@ -2310,6 +2310,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
ath10k_wmi_handle_wep_reauth(ar, skb, status);
/* FW delivers WEP Shared Auth frame with Protected Bit set and
@@ -2351,7 +2357,7 @@ static int freq_to_idx(struct ath10k *ar, int freq)
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
@@ -2612,6 +2618,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
}
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
@@ -2865,11 +2881,8 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
const struct wmi_10_2_4_ext_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
int stats_len;
- bool ext_peer_stats_support;
- ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS,
- ar->wmi.svc_map);
- if (ext_peer_stats_support)
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
else
stats_len = sizeof(struct wmi_10_2_4_peer_stats);
@@ -2886,7 +2899,7 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
- if (ext_peer_stats_support)
+ if (ath10k_peer_stats_enabled(ar))
dst->rx_duration = __le32_to_cpu(src->rx_duration);
/* FIXME: expose 10.2 specific values */
@@ -2905,6 +2918,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
u32 num_pdev_ext_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
+ u32 stats_id;
int i;
if (!skb_pull(skb, sizeof(*ev)))
@@ -2914,6 +2928,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_10_4_pdev_stats *src;
@@ -2953,22 +2968,28 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
- const struct wmi_10_4_peer_stats *src;
+ const struct wmi_10_4_peer_extd_stats *src;
struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+ bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
+
+ if (extd_peer_stats)
+ stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
+ else
+ stats_len = sizeof(struct wmi_10_4_peer_stats);
src = (void *)skb->data;
- if (!skb_pull(skb, sizeof(*src)))
+ if (!skb_pull(skb, stats_len))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
- ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
- dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
- dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
- dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
/* FIXME: expose 10.4 specific values */
+ if (extd_peer_stats)
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
list_add_tail(&dst->list, &stats->peers);
}
@@ -4584,10 +4605,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
- /* only manually set fw features when not using FW IE format */
- if (ar->fw_api == 1 && ar->fw_version_build > 636)
- set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, ar->max_spatial_stream);
@@ -4617,10 +4634,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
}
if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+ ar->max_num_vdevs;
+ else
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ ar->max_num_vdevs;
+
ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
ar->max_num_vdevs;
- ar->num_active_peers = ar->hw_params.qcache_active_peers +
- ar->max_num_vdevs;
ar->num_tids = ar->num_active_peers * 2;
ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
}
@@ -5517,7 +5540,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+
+ if (ath10k_peer_stats_enabled(ar)) {
config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
} else {
@@ -5579,7 +5603,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
features |= WMI_10_2_COEX_GPIO;
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ if (ath10k_peer_stats_enabled(ar))
features |= WMI_10_2_PEER_STATS;
cmd->resource_config.feature_mask = __cpu_to_le32(features);
@@ -5800,9 +5824,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
for (i = 0; i < arg->n_bssids; i++)
- memcpy(&bssids->bssid_list[i],
- arg->bssids[i].bssid,
- ETH_ALEN);
+ ether_addr_copy(bssids->bssid_list[i].addr,
+ arg->bssids[i].bssid);
ptr += sizeof(*bssids);
ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -7484,6 +7507,28 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
return -ENOTSUPP;
}
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct wmi_ext_resource_config_10_4_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+ cmd->host_platform_config = __cpu_to_le32(type);
+ cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+ type, fw_feature_bitmap);
+ return skb;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7810,6 +7855,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+ .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
@@ -7819,7 +7865,7 @@ static const struct wmi_ops wmi_10_4_ops = {
int ath10k_wmi_attach(struct ath10k *ar)
{
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->wmi.ops = &wmi_10_4_ops;
ar->wmi.cmd = &wmi_10_4_cmd_map;
@@ -7861,7 +7907,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
ath10k_err(ar, "unsupported WMI op version: %d\n",
- ar->wmi.op_version);
+ ar->running_fw->fw_file.wmi_op_version);
return -EINVAL;
}