aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c187
1 files changed, 142 insertions, 45 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 3712adc3ccc2..1ad621d13ad3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -263,19 +263,26 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, __le16 fc)
{
- int rate_idx;
+ int rate_idx = -1;
u8 rate_plcp;
u32 rate_flags = 0;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- /* HT rate doesn't make sense for a non data frame */
- WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
- "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
- info->control.rates[0].flags,
- info->control.rates[0].idx);
+ /* info->control is only relevant for non HW rate control */
+ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_data(fc),
+ "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx,
+ le16_to_cpu(fc), mvmsta->sta_state);
+
+ rate_idx = info->control.rates[0].idx;
+ }
- rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(
@@ -305,7 +312,7 @@ static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
- return iwl_mvm_get_tx_rate(mvm, info, sta) |
+ return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
iwl_mvm_get_tx_ant(mvm, info, sta, fc);
}
@@ -1324,12 +1331,24 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
}
static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
- u32 status)
+ u32 status, __le16 frame_control)
{
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_tx_status *status_trig;
int i;
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
+ enum iwl_fw_ini_time_point tp =
+ IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+ if (ieee80211_is_action(frame_control))
+ tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ tp, NULL);
+ return;
+ }
+
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
FW_DBG_TRIGGER_TX_STATUS);
if (!trig)
@@ -1447,7 +1466,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (skb_freed > 1)
info->flags |= IEEE80211_TX_STAT_ACK;
- iwl_mvm_tx_status_check_trigger(mvm, status);
+ iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
info->status.rates[0].count = tx_resp->failure_frame + 1;
iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
@@ -1631,10 +1650,13 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct agg_tx_status *frame_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
+ bool tirgger_timepoint = false;
for (i = 0; i < tx_resp->frame_count; i++) {
u16 fstatus = le16_to_cpu(frame_status[i].status);
-
+ /* In case one frame wasn't transmitted trigger time point */
+ tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
+ AGG_TX_STATE_TRANSMITTED);
IWL_DEBUG_TX_REPLY(mvm,
"status %s (0x%04x), try-count (%d) seq (0x%x)\n",
iwl_get_agg_tx_status(fstatus),
@@ -1643,6 +1665,11 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
AGG_TX_STATE_TRY_CNT_POS,
le16_to_cpu(frame_status[i].sequence));
}
+
+ if (tirgger_timepoint)
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
+
}
#else
static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
@@ -1704,7 +1731,8 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int txq, int index,
- struct ieee80211_tx_info *ba_info, u32 rate)
+ struct ieee80211_tx_info *tx_info, u32 rate,
+ bool is_flush)
{
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data = NULL;
@@ -1747,7 +1775,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
* frames because before failing a frame the firmware transmits
* it without aggregation at least once.
*/
- info->flags |= IEEE80211_TX_STAT_ACK;
+ if (!is_flush)
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
/*
@@ -1766,7 +1795,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (tid_data->txq_id != txq) {
IWL_ERR(mvm,
- "invalid BA notification: Q %d, tid %d\n",
+ "invalid reclaim request: Q %d, tid %d\n",
tid_data->txq_id, tid);
rcu_read_unlock();
return;
@@ -1781,26 +1810,28 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
freed = 0;
/* pack lq color from tid_data along the reduced txp */
- ba_info->status.status_driver_data[0] =
+ tx_info->status.status_driver_data[0] =
RS_DRV_DATA_PACK(tid_data->lq_color,
- ba_info->status.status_driver_data[0]);
- ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+ tx_info->status.status_driver_data[0]);
+ tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (ieee80211_is_data_qos(hdr->frame_control))
- freed++;
- else
- WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+ if (!is_flush) {
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+ }
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
if (freed == 1) {
info->flags |= IEEE80211_TX_STAT_AMPDU;
- memcpy(&info->status, &ba_info->status,
- sizeof(ba_info->status));
+ memcpy(&info->status, &tx_info->status,
+ sizeof(tx_info->status));
iwl_mvm_hwrate_to_tx_status(rate, info);
}
}
@@ -1811,7 +1842,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
* possible (i.e. first MPDU in the aggregation wasn't acked)
* Still it's important to update RS about sent vs. acked.
*/
- if (skb_queue_empty(&reclaimed_skbs)) {
+ if (!is_flush && skb_queue_empty(&reclaimed_skbs)) {
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
@@ -1821,13 +1852,13 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
- ba_info->band = chanctx_conf->def.chan->band;
- iwl_mvm_hwrate_to_tx_status(rate, ba_info);
+ tx_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(rate, tx_info);
if (!iwl_mvm_has_tlc_offload(mvm)) {
IWL_DEBUG_TX_REPLY(mvm,
"No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
}
}
@@ -1843,6 +1874,7 @@ out:
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
int sta_id, tid, txq, index;
struct ieee80211_tx_info ba_info = {};
struct iwl_mvm_ba_notif *ba_notif;
@@ -1855,8 +1887,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
+ u16 tfd_cnt;
int i;
+ if (unlikely(sizeof(*ba_res) > pkt_len))
+ return;
+
sta_id = ba_res->sta_id;
ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
@@ -1865,8 +1901,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp;
- if (!le16_to_cpu(ba_res->tfd_cnt))
- goto out;
+ tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+ if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len)
+ return;
rcu_read_lock();
@@ -1881,7 +1918,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
*/
/* Free per TID */
- for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
+ for (i = 0; i < tfd_cnt; i++) {
struct iwl_mvm_compressed_ba_tfd *ba_tfd =
&ba_res->tfd[i];
@@ -1896,14 +1933,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
&ba_info,
- le32_to_cpu(ba_res->tx_rate));
+ le32_to_cpu(ba_res->tx_rate), false);
}
if (mvmsta)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
-out:
+
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),
@@ -1939,7 +1976,7 @@ out:
rcu_read_unlock();
iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags);
+ tid_data->rate_n_flags, false);
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
@@ -1963,7 +2000,7 @@ out:
* 2) flush the Tx path
* 3) wait for the transport queues to be empty
*/
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
{
int ret;
struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
@@ -1972,29 +2009,89 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
};
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
-
- ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
sizeof(flush_cmd), &flush_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
return ret;
}
-int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
- u16 tids, u32 flags)
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
{
int ret;
+ struct iwl_tx_path_flush_cmd_rsp *rsp;
struct iwl_tx_path_flush_cmd flush_cmd = {
.sta_id = cpu_to_le32(sta_id),
.tid_mask = cpu_to_le16(tids),
};
+ struct iwl_host_cmd cmd = {
+ .id = TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ };
+
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
- ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
- sizeof(flush_cmd), &flush_cmd);
- if (ret)
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+ cmd.flags |= CMD_WANT_SKB;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+ sta_id, tids);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (ret) {
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+ }
+
+ if (cmd.flags & CMD_WANT_SKB) {
+ int i;
+ int num_flushed_queues;
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ rsp = (void *)cmd.resp_pkt->data;
+
+ if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
+ "sta_id %d != rsp_sta_id %d",
+ sta_id, le16_to_cpu(rsp->sta_id))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+ if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+ "num_flushed_queues %d", num_flushed_queues)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ for (i = 0; i < num_flushed_queues; i++) {
+ struct ieee80211_tx_info tx_info = {};
+ struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+ int tid = le16_to_cpu(queue_info->tid);
+ int read_before = le16_to_cpu(queue_info->read_before_flush);
+ int read_after = le16_to_cpu(queue_info->read_after_flush);
+ int queue_num = le16_to_cpu(queue_info->queue_num);
+
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "tid %d queue_id %d read-before %d read-after %d\n",
+ tid, queue_num, read_before, read_after);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
+ &tx_info, 0, true);
+ }
+free_rsp:
+ iwl_free_resp(&cmd);
+ }
return ret;
}
@@ -2007,10 +2104,10 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
offsetof(struct iwl_mvm_sta, sta_id));
if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff);
if (internal)
- return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk);
- return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk);
}