aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c369
1 files changed, 360 insertions, 9 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 9a54f2d2a66b..ac2c5718e454 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
else
- ieee80211_rx_napi(mvm->hw, skb, napi);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
if (mvmvif->features & NETIF_F_RXCSUM &&
- desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
- desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -390,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
return ret;
}
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
+{
+ return ieee80211_sn_less(sn1, sn2) &&
+ !ieee80211_sn_less(sn1, sn2 - buffer_size);
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mvm_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ u16 ssn = reorder_buf->head_sn;
+
+ lockdep_assert_held(&reorder_buf->lock);
+
+ /* ignore nssn smaller than head sn - this can happen due to timeout */
+ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+ return;
+
+ while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ int index = ssn % reorder_buf->buf_size;
+ struct sk_buff_head *skb_list = &reorder_buf->entries[index];
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /* holes are valid since nssn indicates frames were received. */
+ if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
+ continue;
+ /* Empty the list. Will have more than one frame for A-MSDU */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+
+ if (reorder_buf->num_stored && !reorder_buf->removed) {
+ u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
+
+ while (!skb_peek_tail(&reorder_buf->entries[index]))
+ index = (index + 1) % reorder_buf->buf_size;
+ /* modify timer to match next frame's expiration time */
+ mod_timer(&reorder_buf->reorder_timer,
+ reorder_buf->reorder_time[index] + 1 +
+ RX_REORDER_BUF_TIMEOUT_MQ);
+ } else {
+ del_timer(&reorder_buf->reorder_timer);
+ }
+}
+
+void iwl_mvm_reorder_timer_expired(unsigned long data)
+{
+ struct iwl_mvm_reorder_buffer *buf = (void *)data;
+ int i;
+ u16 sn = 0, index = 0;
+ bool expired = false;
+
+ spin_lock_bh(&buf->lock);
+
+ if (!buf->num_stored || buf->removed) {
+ spin_unlock_bh(&buf->lock);
+ return;
+ }
+
+ for (i = 0; i < buf->buf_size ; i++) {
+ index = (buf->head_sn + i) % buf->buf_size;
+
+ if (!skb_peek_tail(&buf->entries[index]))
+ continue;
+ if (!time_after(jiffies, buf->reorder_time[index] +
+ RX_REORDER_BUF_TIMEOUT_MQ))
+ break;
+ expired = true;
+ sn = ieee80211_sn_add(buf->head_sn, i + 1);
+ }
+
+ if (expired) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
+ /* SN is set to the last expired frame + 1 */
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
+ rcu_read_unlock();
+ } else if (buf->num_stored) {
+ /*
+ * If no frame expired and there are stored frames, index is now
+ * pointing to the first unexpired frame - modify timer
+ * accordingly to this frame.
+ */
+ mod_timer(&buf->reorder_timer,
+ buf->reorder_time[index] +
+ 1 + RX_REORDER_BUF_TIMEOUT_MQ);
+ }
+ spin_unlock_bh(&buf->lock);
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+ struct iwl_mvm_delba_data *data)
+{
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ u8 baid = data->baid;
+
+ if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ reorder_buf->buf_size));
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+
+out:
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue)
{
@@ -400,15 +549,182 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
notif = (void *)pkt->data;
internal_notif = (void *)notif->payload;
+ if (internal_notif->sync) {
+ if (mvm->queue_sync_cookie != internal_notif->cookie) {
+ WARN_ONCE(1,
+ "Received expired RX queue sync message\n");
+ return;
+ }
+ atomic_dec(&mvm->queue_sync_counter);
+ }
+
switch (internal_notif->type) {
+ case IWL_MVM_RXQ_EMPTY:
+ break;
case IWL_MVM_RXQ_NOTIF_DEL_BA:
- /* TODO */
+ iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
}
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ int queue,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_baid_data *baid_data;
+ struct iwl_mvm_reorder_buffer *buffer;
+ struct sk_buff *tail;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ int index;
+ u16 nssn, sn;
+ u8 baid;
+
+ baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return false;
+
+ /* no sta yet */
+ if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ return false;
+
+ /* not a data packet */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return false;
+
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN(!baid_data,
+ "Received baid %d, but no data exists for this BAID\n", baid))
+ return false;
+ if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
+ "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
+ baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
+ tid))
+ return false;
+
+ nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+ IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &baid_data->reorder_buf[queue];
+
+ spin_lock_bh(&buffer->lock);
+
+ /*
+ * If there was a significant jump in the nssn - adjust.
+ * If the SN is smaller than the NSSN it might need to first go into
+ * the reorder buffer, in which case we just release up to it and the
+ * rest of the function will take of storing it and releasing up to the
+ * nssn
+ */
+ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+ buffer->buf_size)) {
+ u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
+ }
+
+ /* drop any oudated packets */
+ if (ieee80211_sn_less(sn, buffer->head_sn))
+ goto drop;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
+ buffer->buf_size))
+ buffer->head_sn = nssn;
+ /* No need to update AMSDU last SN - we are moving the head */
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ index = sn % buffer->buf_size;
+
+ /*
+ * Check if we already stored this frame
+ * As AMSDU is either received or not as whole, logic is simple:
+ * If we have frames in that position in the buffer and the last frame
+ * originated from AMSDU had a different SN then it is a retransmission.
+ * If it is the same SN then if the subframe index is incrementing it
+ * is the same AMSDU - otherwise it is a retransmission.
+ */
+ tail = skb_peek_tail(&buffer->entries[index]);
+ if (tail && !amsdu)
+ goto drop;
+ else if (tail && (sn != buffer->last_amsdu ||
+ buffer->last_sub_index >= sub_frame_idx))
+ goto drop;
+
+ /* put in reorder buffer */
+ __skb_queue_tail(&buffer->entries[index], skb);
+ buffer->num_stored++;
+ buffer->reorder_time[index] = jiffies;
+
+ if (amsdu) {
+ buffer->last_amsdu = sn;
+ buffer->last_sub_index = sub_frame_idx;
+ }
+
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+
+drop:
+ kfree_skb(skb);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mvm_baid_data *data;
+
+ rcu_read_lock();
+
+ data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON(!data))
+ goto out;
+
+ if (!data->timeout)
+ goto out;
+
+ timeout = data->timeout;
+ /*
+ * Do not update last rx all the time to avoid cache bouncing
+ * between the rx queues.
+ * Update it every timeout. Worst case is the session will
+ * expire after ~ 2 * timeout, which doesn't matter that much.
+ */
+ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+ /* Update is atomic */
+ data->last_rx = now;
+
+out:
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -451,8 +767,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
- IEEE80211_BAND_2GHZ;
+ rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status);
@@ -479,6 +795,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+ IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT);
/*
* We have tx blocked stations (with CS bit). If we heard
@@ -531,6 +850,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+ iwl_mvm_agg_rx_received(mvm, baid);
}
/*
@@ -588,12 +909,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
/* TODO: PHY info - gscan */
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
rcu_read_unlock();
}
-void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
- /* TODO */
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_frame_release *release = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+
+ int baid = release->baid;
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
+ le16_to_cpu(release->nssn));
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
}