aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c114
1 files changed, 48 insertions, 66 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a9ac872226fd..3e92a117c0b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1899,11 +1899,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
- if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
- mvmvif->lqm_active)
- iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
- 0, 0);
-
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@@ -3432,10 +3427,24 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
return;
mutex_lock(&mvm->mutex);
+
+ /* we are only changing the min_width, may be a noop */
+ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
+ if (phy_ctxt->width == ctx->min_def.width)
+ goto out_unlock;
+
+ /* we are just toggling between 20_NOHT and 20 */
+ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
+ ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
+ goto out_unlock;
+ }
+
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
+
+out_unlock:
mutex_unlock(&mvm->mutex);
}
@@ -3900,11 +3909,6 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
- if (mvmvif->lqm_active)
- iwl_mvm_send_lqm_cmd(vif,
- LQM_CMD_OPERATION_STOP_MEASUREMENT,
- 0, 0);
-
/* Schedule the time event to a bit before beacon 1,
* to make sure we're in the new channel when the
* GO/AP arrives. In case count <= 1 immediately schedule the
@@ -3998,39 +4002,36 @@ out_unlock:
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
{
- if (drop) {
- if (iwl_mvm_has_new_tx_api(mvm))
- /* TODO new tx api */
- WARN_ONCE(1,
- "Need to implement flush TX queue\n");
- else
- iwl_mvm_flush_tx_path(mvm,
- iwl_mvm_flushable_queues(mvm) & queues,
- 0);
- } else {
- if (iwl_mvm_has_new_tx_api(mvm)) {
- struct ieee80211_sta *sta;
- int i;
+ int i;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ if (drop) {
mutex_lock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
- sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[i],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
- continue;
-
- iwl_mvm_wait_sta_queues_empty(mvm,
- iwl_mvm_sta_from_mac80211(sta));
- }
-
+ iwl_mvm_flush_tx_path(mvm,
+ iwl_mvm_flushable_queues(mvm) & queues, 0);
mutex_unlock(&mvm->mutex);
} else {
- iwl_trans_wait_tx_queues_empty(mvm->trans,
- queues);
+ iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
}
+ return;
+ }
+
+ mutex_lock(&mvm->mutex);
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ if (drop)
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+ else
+ iwl_mvm_wait_sta_queues_empty(mvm,
+ iwl_mvm_sta_from_mac80211(sta));
}
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
@@ -4259,31 +4260,6 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
event->u.ba.ssn);
}
-static void
-iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- const struct ieee80211_event *event)
-{
- struct iwl_fw_dbg_trigger_tlv *trig;
- struct iwl_fw_dbg_trigger_ba *ba_trig;
-
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
- return;
-
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
- ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
-
- if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
- return;
-
- iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
- "Frame from %pM timed out, tid %d",
- event->u.ba.sta->addr, event->u.ba.tid);
-}
-
static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
@@ -4298,7 +4274,8 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
iwl_mvm_event_bar_rx_callback(mvm, vif, event);
break;
case BA_FRAME_TIMEOUT:
- iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+ iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
+ event->u.ba.tid);
break;
default:
break;
@@ -4314,9 +4291,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- /* TODO - remove a000 disablement when we have RXQ config API */
- if (!iwl_mvm_has_new_rx_api(mvm) ||
- mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+ if (!iwl_mvm_has_new_rx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;
@@ -4325,6 +4300,13 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
+ /* TODO - remove this when we have RXQ config API */
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+ qmask = BIT(0);
+ if (notif->sync)
+ atomic_set(&mvm->queue_sync_counter, 1);
+ }
+
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);