summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c339
1 files changed, 41 insertions, 298 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 8d1e44fd9d..af15d470c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -376,8 +376,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
(status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
- IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
+ IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) {
+ IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n");
return -1;
+ }
if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_has_protected(hdr->frame_control)))
@@ -503,6 +505,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mvm_sta->dup_data))
+ return false;
+
dup_data = &mvm_sta->dup_data[queue];
/*
@@ -548,44 +554,12 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
}
-/*
- * Returns true if sn2 - buffer_size < sn1 < sn2.
- * To be used only in order to compare reorder buffer head with NSSN.
- * We fully trust NSSN unless it is behind us due to reorder timeout.
- * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
- */
-static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
-{
- return ieee80211_sn_less(sn1, sn2) &&
- !ieee80211_sn_less(sn1, sn2 - buffer_size);
-}
-
-static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
-{
- if (IWL_MVM_USE_NSSN_SYNC) {
- struct iwl_mvm_nssn_sync_data notif = {
- .baid = baid,
- .nssn = nssn,
- };
-
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false,
- &notif, sizeof(notif));
- }
-}
-
-#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
-
-enum iwl_mvm_release_flags {
- IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
- IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
-};
-
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf,
- u16 nssn, u32 flags)
+ u16 nssn)
{
struct iwl_mvm_reorder_buf_entry *entries =
&baid_data->entries[reorder_buf->queue *
@@ -594,31 +568,12 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
- /*
- * We keep the NSSN not too far behind, if we are sync'ing it and it
- * is more than 2048 ahead of us, it must be behind us. Discard it.
- * This can happen if the queue that hit the 0 / 2048 seqno was lagging
- * behind and this queue already processed packets. The next if
- * would have caught cases where this queue would have processed less
- * than 64 packets, but it may have processed more than 64 packets.
- */
- if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
- ieee80211_sn_less(nssn, ssn))
- goto set_timer;
-
- /* ignore nssn smaller than head sn - this can happen due to timeout */
- if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
- goto set_timer;
-
- while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ while (ieee80211_sn_less(ssn, nssn)) {
int index = ssn % reorder_buf->buf_size;
- struct sk_buff_head *skb_list = &entries[index].e.frames;
+ struct sk_buff_head *skb_list = &entries[index].frames;
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
- if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
- (ssn == 2048 || ssn == 0))
- iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
/*
* Empty the list. Will have more than one frame for A-MSDU.
@@ -633,99 +588,6 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
}
}
reorder_buf->head_sn = nssn;
-
-set_timer:
- if (reorder_buf->num_stored && !reorder_buf->removed) {
- u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
-
- while (skb_queue_empty(&entries[index].e.frames))
- index = (index + 1) % reorder_buf->buf_size;
- /* modify timer to match next frame's expiration time */
- mod_timer(&reorder_buf->reorder_timer,
- entries[index].e.reorder_time + 1 +
- RX_REORDER_BUF_TIMEOUT_MQ);
- } else {
- del_timer(&reorder_buf->reorder_timer);
- }
-}
-
-void iwl_mvm_reorder_timer_expired(struct timer_list *t)
-{
- struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
- struct iwl_mvm_baid_data *baid_data =
- iwl_mvm_baid_data_from_reorder_buf(buf);
- struct iwl_mvm_reorder_buf_entry *entries =
- &baid_data->entries[buf->queue * baid_data->entries_per_queue];
- int i;
- u16 sn = 0, index = 0;
- bool expired = false;
- bool cont = false;
-
- spin_lock(&buf->lock);
-
- if (!buf->num_stored || buf->removed) {
- spin_unlock(&buf->lock);
- return;
- }
-
- for (i = 0; i < buf->buf_size ; i++) {
- index = (buf->head_sn + i) % buf->buf_size;
-
- if (skb_queue_empty(&entries[index].e.frames)) {
- /*
- * If there is a hole and the next frame didn't expire
- * we want to break and not advance SN
- */
- cont = false;
- continue;
- }
- if (!cont &&
- !time_after(jiffies, entries[index].e.reorder_time +
- RX_REORDER_BUF_TIMEOUT_MQ))
- break;
-
- expired = true;
- /* continue until next hole after this expired frames */
- cont = true;
- sn = ieee80211_sn_add(buf->head_sn, i + 1);
- }
-
- if (expired) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id = ffs(baid_data->sta_mask) - 1;
-
- rcu_read_lock();
- sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- rcu_read_unlock();
- goto out;
- }
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- /* SN is set to the last expired frame + 1 */
- IWL_DEBUG_HT(buf->mvm,
- "Releasing expired frames for sta %u, sn %d\n",
- sta_id, sn);
- iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
- sta, baid_data->tid);
- iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
- buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
- rcu_read_unlock();
- } else {
- /*
- * If no frame expired and there are stored frames, index is now
- * pointing to the first unexpired frame - modify timer
- * accordingly to this frame.
- */
- mod_timer(&buf->reorder_timer,
- entries[index].e.reorder_time +
- 1 + RX_REORDER_BUF_TIMEOUT_MQ);
- }
-
-out:
- spin_unlock(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
@@ -758,10 +620,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
- reorder_buf->buf_size),
- 0);
+ reorder_buf->buf_size));
spin_unlock_bh(&reorder_buf->lock);
- del_timer_sync(&reorder_buf->reorder_timer);
out:
rcu_read_unlock();
@@ -769,8 +629,7 @@ out:
static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
struct napi_struct *napi,
- u8 baid, u16 nssn, int queue,
- u32 flags)
+ u8 baid, u16 nssn, int queue)
{
struct ieee80211_sta *sta;
struct iwl_mvm_reorder_buffer *reorder_buf;
@@ -788,8 +647,7 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
ba_data = rcu_dereference(mvm->baid_map[baid]);
if (!ba_data) {
- WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC),
- "BAID %d not found in map\n", baid);
+ WARN(true, "BAID %d not found in map\n", baid);
goto out;
}
@@ -803,22 +661,13 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, napi, ba_data,
- reorder_buf, nssn, flags);
+ reorder_buf, nssn);
spin_unlock_bh(&reorder_buf->lock);
out:
rcu_read_unlock();
}
-static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
- struct napi_struct *napi, int queue,
- const struct iwl_mvm_nssn_sync_data *data)
-{
- iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
- data->nssn, queue,
- IWL_MVM_RELEASE_FROM_RSS_SYNC);
-}
-
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -853,14 +702,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
- case IWL_MVM_RXQ_NSSN_SYNC:
- if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data),
- "invalid nssn sync notification size %d (%d)",
- len, (int)sizeof(struct iwl_mvm_nssn_sync_data)))
- break;
- iwl_mvm_nssn_sync(mvm, napi, queue,
- (void *)internal_notif->data);
- break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
@@ -874,55 +715,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
-static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, int tid,
- struct iwl_mvm_reorder_buffer *buffer,
- u32 reorder, u32 gp2, int queue)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
- /* we have a new (A-)MPDU ... */
-
- /*
- * reset counter to 0 if we didn't have any oldsn in
- * the last A-MPDU (as detected by GP2 being identical)
- */
- if (!buffer->consec_oldsn_prev_drop)
- buffer->consec_oldsn_drops = 0;
-
- /* either way, update our tracking state */
- buffer->consec_oldsn_ampdu_gp2 = gp2;
- } else if (buffer->consec_oldsn_prev_drop) {
- /*
- * tracking state didn't change, and we had an old SN
- * indication before - do nothing in this case, we
- * already noted this one down and are waiting for the
- * next A-MPDU (by GP2)
- */
- return;
- }
-
- /* return unless this MPDU has old SN */
- if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN))
- return;
-
- /* update state */
- buffer->consec_oldsn_prev_drop = 1;
- buffer->consec_oldsn_drops++;
-
- /* if limit is reached, send del BA and reset state */
- if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) {
- IWL_WARN(mvm,
- "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n",
- IWL_MVM_AMPDU_CONSEC_DROPS_DELBA,
- sta->addr, queue, tid);
- ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr);
- buffer->consec_oldsn_prev_drop = 0;
- buffer->consec_oldsn_drops = 0;
- }
-}
-
/*
* Returns true if the MPDU was buffered\dropped, false if it should be passed
* to upper layer.
@@ -934,11 +726,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc)
{
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
- struct sk_buff *tail;
u32 reorder = le32_to_cpu(desc->reorder_data);
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
bool last_subframe =
@@ -955,6 +745,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000)
+ return false;
+
/*
* This also covers the case of receiving a Block Ack Request
* outside a BA session; we'll pass it to mac80211 and that
@@ -1016,59 +809,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
buffer->valid = true;
}
- if (ieee80211_is_back_req(hdr->frame_control)) {
- iwl_mvm_release_frames(mvm, sta, napi, baid_data,
- buffer, nssn, 0);
+ /* drop any duplicated packets */
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE))
goto drop;
- }
-
- /*
- * If there was a significant jump in the nssn - adjust.
- * If the SN is smaller than the NSSN it might need to first go into
- * the reorder buffer, in which case we just release up to it and the
- * rest of the function will take care of storing it and releasing up to
- * the nssn.
- * This should not happen. This queue has been lagging and it should
- * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
- * and update the other queues.
- */
- if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
- buffer->buf_size) ||
- !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
- u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
-
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
- min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
- }
-
- iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder,
- rx_status->device_timestamp, queue);
/* drop any oudated packets */
- if (ieee80211_sn_less(sn, buffer->head_sn))
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)
goto drop;
/* release immediately if allowed by nssn and no stored frames */
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
- if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
- buffer->buf_size) &&
- (!amsdu || last_subframe)) {
- /*
- * If we crossed the 2048 or 0 SN, notify all the
- * queues. This is done in order to avoid having a
- * head_sn that lags behind for too long. When that
- * happens, we can get to a situation where the head_sn
- * is within the interval [nssn - buf_size : nssn]
- * which will make us think that the nssn is a packet
- * that we already freed because of the reordering
- * buffer and we will ignore it. So maintain the
- * head_sn somewhat updated across all the queues:
- * when it crosses 0 and 2048.
- */
- if (sn == 2048 || sn == 0)
- iwl_mvm_sync_nssn(mvm, baid, sn);
+ if (!amsdu || last_subframe)
buffer->head_sn = nssn;
- }
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
@@ -1083,37 +835,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* while technically there is no hole and we can move forward.
*/
if (!buffer->num_stored && sn == buffer->head_sn) {
- if (!amsdu || last_subframe) {
- if (sn == 2048 || sn == 0)
- iwl_mvm_sync_nssn(mvm, baid, sn);
+ if (!amsdu || last_subframe)
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
- }
+
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
}
- index = sn % buffer->buf_size;
-
- /*
- * Check if we already stored this frame
- * As AMSDU is either received or not as whole, logic is simple:
- * If we have frames in that position in the buffer and the last frame
- * originated from AMSDU had a different SN then it is a retransmission.
- * If it is the same SN then if the subframe index is incrementing it
- * is the same AMSDU - otherwise it is a retransmission.
- */
- tail = skb_peek_tail(&entries[index].e.frames);
- if (tail && !amsdu)
- goto drop;
- else if (tail && (sn != buffer->last_amsdu ||
- buffer->last_sub_index >= sub_frame_idx))
- goto drop;
-
/* put in reorder buffer */
- __skb_queue_tail(&entries[index].e.frames, skb);
+ index = sn % buffer->buf_size;
+ __skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
- entries[index].e.reorder_time = jiffies;
if (amsdu) {
buffer->last_amsdu = sn;
@@ -1133,8 +866,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
*/
if (!amsdu || last_subframe)
iwl_mvm_release_frames(mvm, sta, napi, baid_data,
- buffer, nssn,
- IWL_MVM_RELEASE_SEND_RSS_SYNC);
+ buffer, nssn);
spin_unlock_bh(&buffer->lock);
return true;
@@ -1507,7 +1239,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
#define IWL_RX_RU_DATA_A1 2
#define IWL_RX_RU_DATA_A2 2
#define IWL_RX_RU_DATA_B1 2
-#define IWL_RX_RU_DATA_B2 3
+#define IWL_RX_RU_DATA_B2 4
#define IWL_RX_RU_DATA_C1 3
#define IWL_RX_RU_DATA_C2 3
#define IWL_RX_RU_DATA_D1 4
@@ -2562,6 +2294,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_rx_csum(mvm, sta, skb, pkt);
if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
+ IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n",
+ le16_to_cpu(hdr->seq_ctrl));
kfree_skb(skb);
goto out;
}
@@ -2613,9 +2347,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
- likely(!iwl_mvm_mei_filter_scan(mvm, skb)))
+ likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+ !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
link_sta);
+ }
out:
rcu_read_unlock();
}
@@ -2758,7 +2498,7 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
le16_to_cpu(release->nssn),
- queue, 0);
+ queue);
}
void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
@@ -2799,7 +2539,10 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
tid))
goto out;
- iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0);
+ IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n",
+ nssn);
+
+ iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue);
out:
rcu_read_unlock();
}