aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath11k/wmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath11k/wmi.c')
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c1462
1 files changed, 1342 insertions, 120 deletions
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6b68ccf65e39..fad9f8d308a2 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -128,8 +129,6 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
- [WMI_TAG_RFKILL_EVENT] = {
- .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -144,6 +143,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+ [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_add_dialog_event) },
};
#define PRIMAP(_hw_mode_) \
@@ -388,6 +389,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
ab->target_pdev_count++;
+ if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
+ return -EINVAL;
+
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
* handled.
@@ -395,7 +400,9 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
- } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
@@ -405,13 +412,11 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
pdev_cap->nss_ratio_info =
WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
- } else {
- return -EINVAL;
}
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
- * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
@@ -526,8 +531,6 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
- ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
-
return 0;
}
@@ -618,10 +621,25 @@ struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
return skb;
}
+static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params.support_off_channel_tx &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_mgmt_send_cmd *cmd;
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
@@ -642,7 +660,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->desc_id = buf_id;
- cmd->chanfreq = 0;
+ cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->frame_len = frame->len;
@@ -973,9 +991,13 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
+ arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -989,6 +1011,17 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+ bss_conf = &arvif->vif->bss_conf;
+
+ if (bss_conf->nontransmitted) {
+ ether_addr_copy(cmd->trans_bssid.addr,
+ bss_conf->transmitter_bssid);
+ cmd->profile_idx = bss_conf->bssid_index;
+ cmd->profile_num = bss_conf->bssid_indicator;
+ }
+ }
+
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
@@ -1678,7 +1711,7 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- if (vif->csa_active) {
+ if (vif->bss_conf.csa_active) {
cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
}
@@ -2013,7 +2046,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
{
/* setup commonly used values */
arg->scan_req_id = 1;
- arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
@@ -3043,8 +3079,34 @@ int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
return ret;
}
-int
-ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
+{
+ twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ twt_params->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ twt_params->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ twt_params->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ twt_params->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ twt_params->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ twt_params->mbss_support = 0;
+}
+
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
@@ -3062,34 +3124,29 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
- cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
- cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
- cmd->congestion_thresh_teardown =
- ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
- cmd->congestion_thresh_critical =
- ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
- cmd->interference_thresh_teardown =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
- cmd->interference_thresh_setup =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
- cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
- cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
- cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
- cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
- cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
- cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
- cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
- cmd->remove_sta_slot_interval =
- ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
- /* TODO add MBSSID support */
- cmd->mbss_support = 0;
-
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_ENABLE_CMDID);
+ cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
+ cmd->default_slot_size = params->default_slot_size;
+ cmd->congestion_thresh_setup = params->congestion_thresh_setup;
+ cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
+ cmd->congestion_thresh_critical = params->congestion_thresh_critical;
+ cmd->interference_thresh_teardown = params->interference_thresh_teardown;
+ cmd->interference_thresh_setup = params->interference_thresh_setup;
+ cmd->min_no_sta_setup = params->min_no_sta_setup;
+ cmd->min_no_sta_teardown = params->min_no_sta_teardown;
+ cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
+ cmd->min_no_twt_slots = params->min_no_twt_slots;
+ cmd->max_no_sta_twt = params->max_no_sta_twt;
+ cmd->mode_check_interval = params->mode_check_interval;
+ cmd->add_sta_slot_interval = params->add_sta_slot_interval;
+ cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
+ cmd->mbss_support = params->mbss_support;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 1;
}
return ret;
}
@@ -3114,11 +3171,181 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_DISABLE_CMDID);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 0;
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_add_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->wake_intvl_us = params->wake_intvl_us;
+ cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+ cmd->wake_dura_us = params->wake_dura_us;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->flags = params->twt_cmd;
+ if (params->flag_bcast)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+ if (params->flag_trigger)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+ if (params->flag_flow_type)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+ if (params->flag_protection)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+ cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+ cmd->flags);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to add twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_del_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi delete twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to delete twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_pause_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi pause twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to pause twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_resume_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->next_twt_size = params->next_twt_size;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+ cmd->next_twt_size);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to resume twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
}
return ret;
}
@@ -3626,7 +3853,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -5083,6 +5311,8 @@ static void ath11k_wmi_event_scan_started(struct ath11k *ar)
break;
case ATH11K_SCAN_STARTING:
ar->scan.state = ATH11K_SCAN_RUNNING;
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
complete(&ar->scan.started);
break;
}
@@ -5165,6 +5395,8 @@ static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
break;
}
}
@@ -5613,9 +5845,9 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
arvif->bssid,
NULL);
if (!sta) {
- ath11k_warn(ab, "not found station for bssid %pM\n",
- arvif->bssid);
- ret = -EPROTO;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
goto exit;
}
@@ -5713,8 +5945,9 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
"wmi stats vdev id %d snr %d\n",
src->vdev_id, src->beacon_snr);
} else {
- ath11k_warn(ab, "not found station for bssid %pM\n",
- arvif->bssid);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for vdev stat\n",
+ arvif->bssid);
}
}
@@ -6177,8 +6410,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const struct wmi_11d_new_cc_ev *ev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
const void **tb;
- int ret;
+ int ret, i;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -6204,6 +6439,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
kfree(tb);
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
queue_work(ab->workqueue, &ab->update_11d_work);
return 0;
@@ -6353,7 +6595,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
fallback:
/* Fallback to older reg (by sending previous country setting
- * again if fw has succeded and we failed to process here.
+ * again if fw has succeeded and we failed to process here.
* The Regdomain should be uniform across driver and fw. Since the
* FW has processed the command and sent a success status, we expect
* this function to succeed as well. If it doesn't, CTRY needs to be
@@ -6560,6 +6802,107 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
rcu_read_unlock();
}
+static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ struct ath11k_sta *arsta;
+ const void **tb;
+ enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch sta ps change ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer sta ps chnange ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ ev->peer_macaddr.addr, ev->peer_ps_state,
+ ev->ps_supported_bitmap, ev->peer_ps_valid,
+ ev->peer_ps_timestamp);
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+
+ if (!ar) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
+ peer->vdev_id);
+
+ goto exit;
+ }
+
+ sta = peer->sta;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!sta) {
+ ath11k_warn(ab, "failed to find station entry %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer_previous_ps_state = arsta->peer_ps_state;
+ arsta->peer_ps_state = ev->peer_ps_state;
+ arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
+ !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
+ !ev->peer_ps_valid)
+ goto out;
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_start_time = ev->peer_ps_timestamp;
+ arsta->ps_start_jiffies = jiffies;
+ } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
+ peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_total_duration = arsta->ps_total_duration +
+ (ev->peer_ps_timestamp - arsta->ps_start_time);
+ }
+
+ if (ar->ps_timekeeper_enable)
+ trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
+ ev->peer_ps_timestamp,
+ arsta->peer_ps_state);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
@@ -7112,47 +7455,64 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
rcu_read_unlock();
}
-static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
{
- const void **tb;
const struct wmi_service_available_event *ev;
- int ret;
+ u32 *wmi_ext2_service_bitmap;
int i, j;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
+ switch (tag) {
+ case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+ ev = (struct wmi_service_available_event *)ptr;
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
- ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
- if (!ev) {
- ath11k_warn(ab, "failed to fetch svc available ev");
- kfree(tb);
- return;
- }
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ ev->wmi_service_segment_bitmap[0],
+ ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2],
+ ev->wmi_service_segment_bitmap[3]);
+ break;
+ case WMI_TAG_ARRAY_UINT32:
+ wmi_ext2_service_bitmap = (u32 *)ptr;
+ for (i = 0, j = WMI_MAX_EXT_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ i++) {
+ do {
+ if (wmi_ext2_service_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
- /* TODO: Use wmi_service_segment_offset information to get the service
- * especially when more services are advertised in multiple sevice
- * available events.
- */
- for (i = 0, j = WMI_MAX_SERVICE;
- i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
- i++) {
- do {
- if (ev->wmi_service_segment_bitmap[i] &
- BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
- set_bit(j, ab->wmi_ab.svc_map);
- } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+ wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+ break;
}
+ return 0;
+}
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
- ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
- ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ int ret;
- kfree(tb);
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_services_parser,
+ NULL);
+ if (ret)
+ ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
}
static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -7185,7 +7545,53 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- ath11k_debugfs_fw_stats_process(ab, skb);
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats_done = true;
+ goto complete;
+ }
+
+ /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
+ * are currently requested only via debugfs fw stats. Hence, processing these
+ * in debugfs context
+ */
+ ath11k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -7248,7 +7654,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
continue;
}
- if (arvif->is_up && arvif->vif->csa_active)
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(arvif->vif);
}
rcu_read_unlock();
@@ -7338,40 +7744,6 @@ exit:
kfree(tb);
}
-static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
- const struct wmi_rfkill_state_change_ev *ev;
- const void **tb;
- int ret;
-
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
-
- ev = tb[WMI_TAG_RFKILL_EVENT];
- if (!ev) {
- kfree(tb);
- return;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
- ev->gpio_pin_num,
- ev->int_type,
- ev->radio_state);
-
- spin_lock_bh(&ab->base_lock);
- ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
- spin_unlock_bh(&ab->base_lock);
-
- queue_work(ab->workqueue, &ab->rfkill_work);
- kfree(tb);
-}
-
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
@@ -7532,6 +7904,116 @@ ath11k_wmi_diag_event(struct ath11k_base *ab,
trace_ath11k_wmi_diag(ab, skb->data, skb->len);
}
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+ switch (status) {
+ case WMI_ADD_TWT_STATUS_OK:
+ return "ok";
+ case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+ return "twt disabled";
+ case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+ return "dialog id in use";
+ case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+ return "invalid parameters";
+ case WMI_ADD_TWT_STATUS_NOT_READY:
+ return "not ready";
+ case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+ return "resource unavailable";
+ case WMI_ADD_TWT_STATUS_NO_ACK:
+ return "no ack";
+ case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+ return "no response";
+ case WMI_ADD_TWT_STATUS_DENIED:
+ return "denied";
+ case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+ fallthrough;
+ default:
+ return "unknown error";
+ }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_add_dialog_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse wmi twt add dialog status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+ goto exit;
+ }
+
+ if (ev->status)
+ ath11k_warn(ab,
+ "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+ ev->vdev_id, ev->dialog_id,
+ ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath11k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
+ ev->refresh_cnt);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
+ NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
+
+ replay_ctr = ev->replay_ctr.word1;
+ replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
+ arvif->rekey_data.replay_ctr = replay_ctr;
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7629,11 +8111,17 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
+ case WMI_TWT_ADD_DIALOG_EVENTID:
+ ath11k_wmi_twt_add_dialog_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
+ case WMI_TWT_DEL_DIALOG_EVENTID:
+ case WMI_TWT_PAUSE_DIALOG_EVENTID:
+ case WMI_TWT_RESUME_DIALOG_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
@@ -7651,12 +8139,15 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_11D_NEW_COUNTRY_EVENTID:
ath11k_reg_11d_new_cc_event(ab, skb);
break;
- case WMI_RFKILL_STATE_CHANGE_EVENTID:
- ath11k_rfkill_state_change_event(ab, skb);
- break;
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
+ case WMI_PEER_STA_PS_STATECHG_EVENTID:
+ ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath11k_wmi_gtk_offload_status_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7798,6 +8289,59 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar)
return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_debug_log_config_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ int ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->dbg_log_param = dbglog->param;
+
+ tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+ switch (dbglog->param) {
+ case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ break;
+ case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+ case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ memcpy(tlv->value, module_id_bitmap,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ /* clear current config to be used for next user config */
+ memset(module_id_bitmap, 0,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DBGLOG_CFG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
int ath11k_wmi_connect(struct ath11k_base *ab)
{
u32 i;
@@ -7851,7 +8395,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
- if (ab->hw_params.single_pdev_only)
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
@@ -7873,6 +8417,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab)
ath11k_wmi_free_dbring_caps(ab);
}
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable;
+
+ /* Set all modes in case of disable */
+ if (cmd->enable)
+ cmd->hw_filter_bitmap = filter_bitmap;
+ else
+ cmd->hw_filter_bitmap = ((u32)~0U);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ enable, filter_bitmap);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
{
struct wmi_wow_host_wakeup_ind *cmd;
@@ -7943,3 +8520,648 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
}
+
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->is_add = enable;
+ cmd->event_bitmap = (1 << event);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_ADD_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
+ bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_BITMAP_PATTERN_T) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
+ bitmap->pattern_offset = pattern_offset;
+ bitmap->pattern_len = pattern_len;
+ bitmap->bitmask_len = pattern_len;
+ bitmap->pattern_id = pattern_id;
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_DEL_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 *channel_list;
+ size_t len, nlo_list_len, channel_list_len;
+ u8 *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = pno->vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = pno->active_max_time;
+ cmd->passive_dwell_time = pno->passive_max_time;
+
+ if (pno->do_passive_scan)
+ cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
+
+ cmd->fast_scan_period = pno->fast_scan_period;
+ cmd->slow_scan_period = pno->slow_scan_period;
+ cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
+ cmd->delay_start_time = pno->delay_start_time;
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
+ ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = pno->uc_networks_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = (struct nlo_configured_parameters *)ptr;
+ for (i = 0; i < cmd->no_of_ssids; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
+
+ nlo_list[i].ssid.valid = true;
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ nlo_list[i].ssid.ssid.ssid_len);
+ ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
+ roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = true;
+ nlo_list[i].rssi_cond.rssi =
+ pno->a_networks[i].rssi_threshold;
+ }
+
+ nlo_list[i].bcast_nw_type.valid = true;
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ pno->a_networks[i].bcast_nw_type;
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = pno->a_networks[0].channel_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = (u32 *)ptr;
+ for (i = 0; i < cmd->num_of_channels; i++)
+ channel_list[i] = pno->a_networks[0].channels[i];
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_STOP;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_tuple *ns;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = (struct wmi_tlv *)buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = (struct wmi_ns_offload_tuple *)buf_ptr;
+ ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= WMI_NSOL_FLAGS_VALID;
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+ ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
+ ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+ ath11k_ce_byte_swap(ns->target_mac.addr, 8);
+
+ if (ns->target_mac.word0 != 0 ||
+ ns->target_mac.word1 != 0) {
+ ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_tuple *arp;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = (struct wmi_arp_offload_tuple *)buf_ptr;
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = WMI_ARPOL_FLAGS_VALID;
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+ ath11k_ce_byte_swap(arp->target_ipaddr, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct ath11k_arp_ns_offload *offload;
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ offload = &arvif->arp_ns_offload;
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
+ }
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->flags = 0;
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->num_ns_ext_tuples = ns_ext_tuples;
+
+ buf_ptr += sizeof(*cmd);
+
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+ int len;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+
+ if (enable) {
+ cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+ ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
+ } else {
+ cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ int len;
+ struct sk_buff *skb;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
+{ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, sar_len_aligned, rsvd_len_aligned;
+
+ sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sar_len_aligned +
+ TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->sar_len = BIOS_SAR_TABLE_LEN;
+ cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, rsvd_len_aligned;
+
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+}
+
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->enabled = arg->enabled;
+ cmd->interval = arg->interval;
+ cmd->method = arg->method;
+
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}