summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c735
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c1573
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2067
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c1306
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c178
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c1618
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4730
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2002
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c646
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c255
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1732
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c326
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c1066
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c359
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c374
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c4163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h469
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c919
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c1518
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2031
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c3667
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h579
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/testmode.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c921
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c909
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2006
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c1878
38 files changed, 42613 insertions, 0 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
new file mode 100644
index 000000000..9ffd21918
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IWLMVM) += iwlmvm.o
+iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
+iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
+iwlmvm-y += power.o coex.o
+iwlmvm-y += tt.o offloading.o tdls.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
+iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
+iwlmvm-$(CONFIG_PM) += d3.o
+
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
new file mode 100644
index 000000000..75d35f6b0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -0,0 +1,226 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ int idx;
+
+ struct iwl_mvm_phy_ctxt *phyctxt;
+
+ u16 ids[MAX_MACS_IN_BINDING];
+ u16 colors[MAX_MACS_IN_BINDING];
+};
+
+static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
+ struct iwl_mvm_iface_iterator_data *data)
+{
+ struct iwl_binding_cmd cmd;
+ struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
+ int i, ret;
+ u32 status;
+ int size;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(cmd);
+ if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
+ !iwl_mvm_is_cdb_supported(mvm))
+ cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+ cmd.action = cpu_to_le32(action);
+ cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+
+ for (i = 0; i < MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+ for (i = 0; i < data->idx; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
+ data->colors[i]));
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ size, &cmd, &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
+ action, ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif)
+ return;
+
+ if (mvmvif->phy_ctxt != data->phyctxt)
+ return;
+
+ if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
+ return;
+
+ data->ids[data->idx] = mvmvif->id;
+ data->colors[data->idx] = mvmvif->color;
+ data->idx++;
+}
+
+static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_phy_ctxt *phyctxt,
+ bool add)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_iface_iterator_data data = {
+ .ignore_vif = vif,
+ .phyctxt = phyctxt,
+ };
+ u32 action = FW_CTXT_ACTION_MODIFY;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_iface_iterator,
+ &data);
+
+ /*
+ * If there are no other interfaces yet we
+ * need to create a new binding.
+ */
+ if (data.idx == 0) {
+ if (add)
+ action = FW_CTXT_ACTION_ADD;
+ else
+ action = FW_CTXT_ACTION_REMOVE;
+ }
+
+ if (add) {
+ if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
+ return -EINVAL;
+
+ data.ids[data.idx] = mvmvif->id;
+ data.colors[data.idx] = mvmvif->color;
+ data.idx++;
+ }
+
+ return iwl_mvm_binding_cmd(mvm, action, &data);
+}
+
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ /*
+ * Update SF - Disable if needed. if this fails, SF might still be on
+ * while many macs are bound, which is forbidden - so fail the binding.
+ */
+ if (iwl_mvm_sf_update(mvm, vif, false))
+ return -EINVAL;
+
+ return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
+}
+
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+
+ if (!ret)
+ if (iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
new file mode 100644
index 000000000..016e03a50
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -0,0 +1,735 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "fw/api/coex.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-debug.h"
+
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+ /* dummy entry for channel 0 */
+ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+ {
+ cpu_to_le64(0x0000001FFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000000FFFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000003FFFCULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ },
+ {
+ cpu_to_le64(0x00001FFFE0ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ },
+ {
+ cpu_to_le64(0x00007FFF80ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ },
+ {
+ cpu_to_le64(0x0003FFFC00ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ },
+ {
+ cpu_to_le64(0x000FFFF000ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ },
+ {
+ cpu_to_le64(0x007FFF8000ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ },
+ {
+ cpu_to_le64(0x01FFFE0000ULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ },
+ {
+ cpu_to_le64(0x0FFFF00000ULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ cpu_to_le64(0x0ULL),
+ },
+ {
+ cpu_to_le64(0x3FFFC00000ULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFFE000000ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFF8000000ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0ULL)
+ },
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum iwl_bt_coex_lut_type ret;
+ u16 phy_ctx_id;
+ u32 primary_ch_phy_id, secondary_ch_phy_id;
+
+ /*
+ * Checking that we hold mvm->mutex is a good idea, but the rate
+ * control can't acquire the mutex since it runs in Tx path.
+ * So this is racy in that case, but in the worst case, the AMPDU
+ * size limit will be wrong for a short time which is not a big
+ * issue.
+ */
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return BT_COEX_INVALID_LUT;
+ }
+
+ ret = BT_COEX_TX_DIS_LUT;
+
+ if (mvm->cfg->bt_shared_single_ant) {
+ rcu_read_unlock();
+ return ret;
+ }
+
+ phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+ primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
+ secondary_ch_phy_id =
+ le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
+
+ if (primary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+ else if (secondary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+ /* else - default = TX TX disallowed */
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_bt_coex_cmd bt_cmd = {};
+ u32 mode;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+ switch (mvm->bt_force_ant_mode) {
+ case BT_FORCE_ANT_BT:
+ mode = BT_COEX_BT;
+ break;
+ case BT_FORCE_ANT_WIFI:
+ mode = BT_COEX_WIFI;
+ break;
+ default:
+ WARN_ON(1);
+ mode = 0;
+ }
+
+ bt_cmd.mode = cpu_to_le32(mode);
+ goto send_cmd;
+ }
+
+ mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
+ bt_cmd.mode = cpu_to_le32(mode);
+
+ if (IWL_MVM_BT_COEX_SYNC2SCO)
+ bt_cmd.enabled_modules |=
+ cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
+
+ if (iwl_mvm_is_mplut_supported(mvm))
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+
+send_cmd:
+ memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+ return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
+}
+
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+ bool enable)
+{
+ struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
+ struct iwl_mvm_sta *mvmsta;
+ u32 value;
+ int ret;
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (!mvmsta)
+ return 0;
+
+ /* nothing to do */
+ if (mvmsta->bt_reduced_txpower == enable)
+ return 0;
+
+ value = mvmsta->sta_id;
+
+ if (enable)
+ value |= BT_REDUCED_TX_POWER_BIT;
+
+ IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
+ enable ? "en" : "dis", sta_id);
+
+ cmd.reduced_txp = cpu_to_le32(value);
+ mvmsta->bt_reduced_txpower = enable;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+
+ return ret;
+}
+
+struct iwl_bt_iterator_data {
+ struct iwl_bt_coex_profile_notif *notif;
+ struct iwl_mvm *mvm;
+ struct ieee80211_chanctx_conf *primary;
+ struct ieee80211_chanctx_conf *secondary;
+ bool primary_ll;
+ u8 primary_load;
+ u8 secondary_load;
+};
+
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, int rssi)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->bf_data.last_bt_coex_event = rssi;
+ mvmvif->bf_data.bt_coex_max_thold =
+ enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
+ mvmvif->bf_data.bt_coex_min_thold =
+ enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
+}
+
+#define MVM_COEX_TCM_PERIOD (HZ * 10)
+
+static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
+ struct iwl_bt_iterator_data *data)
+{
+ unsigned long now = jiffies;
+
+ if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD))
+ return;
+
+ mvm->bt_coex_last_tcm_ts = now;
+
+ /* We assume here that we don't have more than 2 vifs on 2.4GHz */
+
+ /* if the primary is low latency, it will stay primary */
+ if (data->primary_ll)
+ return;
+
+ if (data->primary_load >= data->secondary_load)
+ return;
+
+ swap(data->primary, data->secondary);
+}
+
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_bt_iterator_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ /* default smps_mode is AUTOMATIC - only used for client modes */
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ u32 bt_activity_grading;
+ int ave_rssi;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!mvmvif->ap_ibss_active)
+ return;
+ break;
+ default:
+ return;
+ }
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+ /* If channel context is invalid or not on 2.4GHz .. */
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /* ... relax constraints and disable rssi events */
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ false);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ }
+ return;
+ }
+
+ bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+ if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_STATIC;
+ else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+
+ /* relax SMPS constraints for next association */
+ if (!vif->bss_conf.assoc)
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (mvmvif->phy_ctxt &&
+ (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id)))
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d: bt_activity_grading %d smps_req %d\n",
+ mvmvif->id, bt_activity_grading, smps_mode);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+
+ /* low latency is always primary */
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ data->primary_ll = true;
+
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (chanctx_conf == data->primary)
+ return;
+
+ if (!data->primary_ll) {
+ /*
+ * downgrade the current primary no matter what its
+ * type is.
+ */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ } else {
+ /* there is low latency vif - we will be secondary */
+ data->secondary = chanctx_conf;
+ }
+
+ if (data->primary == chanctx_conf)
+ data->primary_load = mvm->tcm.result.load[mvmvif->id];
+ else if (data->secondary == chanctx_conf)
+ data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+ return;
+ }
+
+ /*
+ * STA / P2P Client, try to be primary if first vif. If we are in low
+ * latency mode, we are already in primary and just don't do much
+ */
+ if (!data->primary || data->primary == chanctx_conf)
+ data->primary = chanctx_conf;
+ else if (!data->secondary)
+ /* if secondary is not NULL, it might be a GO */
+ data->secondary = chanctx_conf;
+
+ if (data->primary == chanctx_conf)
+ data->primary_load = mvm->tcm.result.load[mvmvif->id];
+ else if (data->secondary == chanctx_conf)
+ data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+ /*
+ * don't reduce the Tx power if one of these is true:
+ * we are in LOOSE
+ * single share antenna product
+ * BT is inactive
+ * we are not associated
+ */
+ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+ mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
+ iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ return;
+ }
+
+ /* try to get the avg rssi from fw */
+ ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+
+ /* if the RSSI isn't valid, fake it is very low */
+ if (!ave_rssi)
+ ave_rssi = -100;
+ if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
+ if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
+ if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ }
+
+ /* Begin to monitor the RSSI: it may influence the reduced Tx power */
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+}
+
+static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
+{
+ struct iwl_bt_iterator_data data = {
+ .mvm = mvm,
+ .notif = &mvm->last_bt_notif,
+ };
+ struct iwl_bt_coex_ci_cmd cmd = {};
+ u8 ci_bw_idx;
+
+ /* Ignore updates if we are in force mode */
+ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+ return;
+
+ rcu_read_lock();
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bt_notif_iterator, &data);
+
+ iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
+
+ if (data.primary) {
+ struct ieee80211_chanctx_conf *chan = data.primary;
+ if (WARN_ON(!chan->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ } else {
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_primary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.primary_ch_phy_id =
+ cpu_to_le32(*((u16 *)data.primary->drv_priv));
+ }
+
+ if (data.secondary) {
+ struct ieee80211_chanctx_conf *chan = data.secondary;
+ if (WARN_ON(!data.secondary->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ } else {
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_secondary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.secondary_ch_phy_id =
+ cpu_to_le32(*((u16 *)data.secondary->drv_priv));
+ }
+
+ rcu_read_unlock();
+
+ /* Don't spam the fw with the same command over and over */
+ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
+ memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+ }
+}
+
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
+
+ /* remember this notification for future use: rssi fluctuations */
+ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
+
+ iwl_mvm_bt_coex_notif_handle(mvm);
+}
+
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum ieee80211_rssi_event_data rssi_event)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Ignore updates if we are in force mode */
+ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+ return;
+
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
+ return;
+
+ /* No BT - reports should be disabled */
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
+ return;
+
+ IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
+ rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
+
+ /*
+ * Check if rssi is good enough for reduced Tx power, but not in loose
+ * scheme.
+ */
+ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+ iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ false);
+ else
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
+
+ if (ret)
+ IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
+
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+ if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ /* tight coex, high bt traffic, reduce AGG time limit */
+ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
+ return true;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return true;
+
+ /*
+ * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
+ * since BT is already killed.
+ * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
+ * we Tx.
+ * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
+ */
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+ return lut_type != BT_COEX_LOOSE_LUT;
+}
+
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
+{
+ /* there is no other antenna, shared antenna is always available */
+ if (mvm->cfg->bt_shared_single_ant)
+ return true;
+
+ if (ant & mvm->cfg->non_shared_ant)
+ return true;
+
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC;
+}
+
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
+{
+ /* there is no other antenna, shared antenna is always available */
+ if (mvm->cfg->bt_shared_single_ant)
+ return true;
+
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
+}
+
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum nl80211_band band)
+{
+ u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+ if (band != NL80211_BAND_2GHZ)
+ return false;
+
+ return bt_activity >= BT_LOW_TRAFFIC;
+}
+
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac)
+{
+ __le16 fc = hdr->frame_control;
+ bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
+
+ if (info->band != NL80211_BAND_2GHZ)
+ return 0;
+
+ if (unlikely(mvm->bt_tx_prio))
+ return mvm->bt_tx_prio - 1;
+
+ if (likely(ieee80211_is_data(fc))) {
+ if (likely(ieee80211_is_data_qos(fc))) {
+ switch (ac) {
+ case IEEE80211_AC_BE:
+ return mplut_enabled ? 1 : 0;
+ case IEEE80211_AC_VI:
+ return mplut_enabled ? 2 : 3;
+ case IEEE80211_AC_VO:
+ return 3;
+ default:
+ return 0;
+ }
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ return 3;
+ } else
+ return 0;
+ } else if (ieee80211_is_mgmt(fc)) {
+ return ieee80211_is_disassoc(fc) ? 0 : 3;
+ } else if (ieee80211_is_ctl(fc)) {
+ /* ignore cfend and cfendack frames as we never send those */
+ return 3;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+ iwl_mvm_bt_coex_notif_handle(mvm);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
new file mode 100644
index 000000000..d61ff66ce
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __MVM_CONSTANTS_H
+#define __MVM_CONSTANTS_H
+
+#include <linux/ieee80211.h>
+
+#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+
+#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
+#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
+#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
+#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
+#define IWL_MVM_PS_SNOOZE_INTERVAL 25
+#define IWL_MVM_PS_SNOOZE_WINDOW 50
+#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
+#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62
+#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65
+#define IWL_MVM_BT_COEX_SYNC2SCO 1
+#define IWL_MVM_BT_COEX_MPLUT 1
+#define IWL_MVM_BT_COEX_RRC 1
+#define IWL_MVM_BT_COEX_TTC 1
+#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
+#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
+#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
+#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
+#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
+#define IWL_MVM_QUOTA_THRESHOLD 4
+#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
+#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
+#define IWL_MVM_TOF_IS_RESPONDER 0
+#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
+#define IWL_MVM_HW_CSUM_DISABLE 0
+#define IWL_MVM_PARSE_NVM 0
+#define IWL_MVM_ADWELL_ENABLE 1
+#define IWL_MVM_ADWELL_MAX_BUDGET 0
+#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
+#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */
+#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */
+#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */
+#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
+#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
+#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
+#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
+#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
+#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
+#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
+#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
+#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
+#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
+#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3
+#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8
+#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */
+#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */
+#define IWL_MVM_RS_MISSED_RATE_MAX 15
+#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160
+#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480
+#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160
+#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400
+#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500
+#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */
+#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */
+#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */
+#define IWL_MVM_RS_AGG_DISABLE_START 3
+#define IWL_MVM_RS_AGG_START_THRESHOLD 10 /* num frames per second */
+#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
+#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
+#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
+#define IWL_MVM_ENABLE_EBS 1
+
+#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
new file mode 100644
index 000000000..868cb1195
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -0,0 +1,2164 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/fs.h>
+#include <net/cfg80211.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/addrconf.h>
+#include "iwl-modparams.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (iwlwifi_mod_params.swcrypto)
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ mvmvif->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+ mvmvif->rekey_data.valid = true;
+
+ mutex_unlock(&mvm->mutex);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+ if (ifa->flags & IFA_F_TENTATIVE)
+ __set_bit(idx, mvmvif->tentative_addrs);
+ idx++;
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ mvmvif->num_target_ipv6_addrs = idx;
+}
+#endif
+
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->tx_key_idx = idx;
+}
+
+static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
+{
+ int i;
+
+ for (i = 0; i < IWL_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
+}
+
+static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
+ struct iwl_mvm_key_pn *ptk_pn,
+ struct ieee80211_key_seq *seq,
+ int tid, int queues)
+{
+ const u8 *ret = seq->ccmp.pn;
+ int i;
+
+ /* get the PN from mac80211, used on the default queue */
+ ieee80211_get_key_rx_seq(key, tid, seq);
+
+ /* and use the internal data for the other queues */
+ for (i = 1; i < queues; i++) {
+ const u8 *tmp = ptk_pn->q[i].pn[tid];
+
+ if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
+ ret = tmp;
+ }
+
+ return ret;
+}
+
+struct wowlan_key_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwl_wowlan_tkip_params_cmd *tkip;
+ bool error, use_rsc_tsc, use_tkip, configure_keys;
+ int wep_key_idx;
+};
+
+static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct wowlan_key_data *data = _data;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwl_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWL_P1K_SIZE];
+ int ret, i;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
+ struct {
+ struct iwl_mvm_wep_key_cmd wep_key_cmd;
+ struct iwl_mvm_wep_key wep_key;
+ } __packed wkc = {
+ .wep_key_cmd.mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .wep_key_cmd.num_keys = 1,
+ /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+ .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
+ .wep_key.key_index = key->keyidx,
+ .wep_key.key_size = key->keylen,
+ };
+
+ /*
+ * This will fail -- the key functions don't set support
+ * pairwise WEP keys. However, that's better than silently
+ * failing WoWLAN. Or maybe not?
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ break;
+
+ memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+ if (key->keyidx == mvmvif->tx_key_idx) {
+ /* TX key must be at offset 0 */
+ wkc.wep_key.key_offset = 0;
+ } else {
+ /* others start at 1 */
+ data->wep_key_idx++;
+ wkc.wep_key.key_offset = data->wep_key_idx;
+ }
+
+ if (data->configure_keys) {
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
+ sizeof(wkc), &wkc);
+ data->error = ret != 0;
+
+ mvm->ptk_ivlen = key->iv_len;
+ mvm->ptk_icvlen = key->icv_len;
+ mvm->gtk_ivlen = key->iv_len;
+ mvm->gtk_icvlen = key->icv_len;
+ mutex_unlock(&mvm->mutex);
+ }
+
+ /* don't upload key again */
+ return;
+ }
+ default:
+ data->error = true;
+ return;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /*
+ * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+ * but we also shouldn't abort suspend due to that. It does have
+ * support for the IGTK key renewal, but doesn't really use the
+ * IGTK for anything. This means we could spuriously wake up or
+ * be deauthenticated, but that was considered acceptable.
+ */
+ return;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ u64 pn64;
+
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+ rx_p1ks = data->tkip->rx_uni;
+
+ pn64 = atomic64_read(&key->tx_pn);
+ tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+ tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+
+ ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
+ p1k);
+ iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u64 pn64;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ pn64 = atomic64_read(&key->tx_pn);
+ aes_tx_sc->pn = cpu_to_le64(pn64);
+ } else {
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211/our RX code use TID 0 for checking the PN.
+ */
+ if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_key_pn *ptk_pn;
+ const u8 *pn;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ ptk_pn = rcu_dereference_protected(
+ mvmsta->ptk_pn[key->keyidx],
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ON(!ptk_pn))
+ break;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
+ mvm->trans->num_rx_queues);
+ aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ } else {
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ if (data->configure_keys) {
+ mutex_lock(&mvm->mutex);
+ /*
+ * The D3 firmware hardcodes the key offset 0 as the key it
+ * uses to transmit packets to the AP, i.e. the PTK.
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ mvm->ptk_ivlen = key->iv_len;
+ mvm->ptk_icvlen = key->icv_len;
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
+ } else {
+ /*
+ * firmware only supports TSC/RSC for a single key,
+ * so if there are multiple keep overwriting them
+ * with new ones -- this relies on mac80211 doing
+ * list_add_tail().
+ */
+ mvm->gtk_ivlen = key->iv_len;
+ mvm->gtk_icvlen = key->icv_len;
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
+ }
+ mutex_unlock(&mvm->mutex);
+ data->error = ret != 0;
+ }
+}
+
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
+static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *ctx;
+ u8 chains_static, chains_dynamic;
+ struct cfg80211_chan_def chandef;
+ int ret, i;
+ struct iwl_binding_cmd binding_cmd = {};
+ struct iwl_time_quota_cmd quota_cmd = {};
+ struct iwl_time_quota_data *quota;
+ u32 status;
+ int size;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(binding_cmd);
+ if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
+ !iwl_mvm_is_cdb_supported(mvm))
+ binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+ else
+ binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
+
+ /* add back the PHY */
+ if (WARN_ON(!mvmvif->phy_ctxt))
+ return -EINVAL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!ctx)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ chandef = ctx->def;
+ chains_static = ctx->rx_chains_static;
+ chains_dynamic = ctx->rx_chains_dynamic;
+ rcu_read_unlock();
+
+ ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
+ chains_static, chains_dynamic);
+ if (ret)
+ return ret;
+
+ /* add back the MAC */
+ mvmvif->uploaded = false;
+
+ if (WARN_ON(!vif->bss_conf.assoc))
+ return -EINVAL;
+
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ return ret;
+
+ /* add back binding - XXX refactor? */
+ binding_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ binding_cmd.phy =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ for (i = 1; i < MAX_MACS_IN_BINDING; i++)
+ binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ size, &binding_cmd, &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ return -EIO;
+ }
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
+ if (ret)
+ return ret;
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ if (ret)
+ return ret;
+
+ /* and some quota */
+ quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
+ quota->id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+ mvmvif->phy_ctxt->color));
+ quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+
+ for (i = 1; i < MAX_BINDINGS; i++) {
+ quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
+ quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+ iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+
+ if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
+ IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+
+ return 0;
+}
+
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NON_QOS_TX_COUNTER_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int err;
+ u32 size;
+
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ if (err)
+ return err;
+
+ size = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (size < sizeof(__le16)) {
+ err = -EINVAL;
+ } else {
+ err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+ /* firmware returns next, not last-used seqno */
+ err = (u16) (err - 0x10);
+ }
+
+ iwl_free_resp(&cmd);
+ return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .value = cpu_to_le16(mvmvif->seqno),
+ };
+
+ /* return if called during restart, not resume from D3 */
+ if (!mvmvif->seqno_valid)
+ return;
+
+ mvmvif->seqno_valid = false;
+
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
+ sizeof(query_cmd), &query_cmd))
+ IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
+static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
+{
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+
+ iwl_mvm_stop_device(mvm);
+ /*
+ * Set the HW restart bit -- this is mostly true as we're
+ * going to load new firmware and reprogram that, though
+ * the reprogramming is going to be manual to avoid adding
+ * all the MACs that aren't support.
+ * We don't have to clear up everything though because the
+ * reprogramming is manual. When we resume, we'll actually
+ * go through a proper restart sequence again to switch
+ * back to the runtime firmware image.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ /* the fw is reset, so all the keys are cleared */
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+
+ return iwl_mvm_load_d3_fw(mvm);
+}
+
+static int
+iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ int ret;
+ struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+
+ /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
+
+ wowlan_config_cmd->is_11n_connection =
+ ap_sta->ht_cap.ht_supported;
+ wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
+ ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+
+ /* Query the last used seqno and set it */
+ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+ if (ret < 0)
+ return ret;
+
+ wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
+
+ if (wowlan->disconnect)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ if (wowlan->tcp) {
+ /*
+ * Set the "link change" (really "link lost") flag as well
+ * since that implies losing the TCP connection.
+ */
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
+ IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
+ IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ }
+
+ if (wowlan->any) {
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+ }
+
+ return 0;
+}
+
+static void
+iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *data)
+{
+ struct ieee80211_sta *ap_sta;
+
+ rcu_read_lock();
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out;
+
+ ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
+out:
+ rcu_read_unlock();
+}
+
+int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool d0i3,
+ u32 cmd_flags)
+{
+ struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+ struct wowlan_key_data key_data = {
+ .configure_keys = !d0i3 && !unified,
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
+
+ /*
+ * if we have to configure keys, call ieee80211_iter_keys(),
+ * as we need non-atomic context in order to take the
+ * required locks.
+ * for the d0i3 we can't use ieee80211_iter_keys(), as
+ * taking (almost) any mutex might result in deadlock.
+ */
+ if (!d0i3) {
+ /*
+ * Note that currently we don't propagate cmd_flags
+ * to the iterator. In case of key_data.configure_keys,
+ * all the configured commands are SYNC, and
+ * iwl_mvm_wowlan_program_keys() will take care of
+ * locking/unlocking mvm->mutex.
+ */
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_wowlan_program_keys,
+ &key_data);
+ } else {
+ iwl_mvm_iter_d0i3_ap_keys(mvm, vif,
+ iwl_mvm_wowlan_program_keys,
+ &key_data);
+ }
+
+ if (key_data.error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_TSC_RSC_PARAM, cmd_flags,
+ sizeof(*key_data.rsc_tsc),
+ key_data.rsc_tsc);
+ if (ret)
+ goto out;
+ }
+
+ if (key_data.use_tkip &&
+ !fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_TKIP_PARAM,
+ cmd_flags, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto out;
+ }
+
+ /* configure rekey data only if offloaded rekey is supported (d3) */
+ if (mvmvif->rekey_data.valid && !d0i3) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
+ NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
+ NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
+ sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto out;
+ }
+ ret = 0;
+out:
+ kfree(key_data.rsc_tsc);
+ return ret;
+}
+
+static int
+iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ int ret;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ if (!unified_image) {
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+ if (ret)
+ return ret;
+ }
+
+ if (!iwlwifi_mod_params.swcrypto) {
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&mvm->mutex);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false,
+ CMD_ASYNC);
+ mutex_lock(&mvm->mutex);
+ if (ret)
+ return ret;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(*wowlan_config_cmd),
+ wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_send_patterns(mvm, wowlan);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
+}
+
+static int
+iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ int ret;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ if (!unified_image) {
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
+ } else {
+ /* In theory, we wouldn't have to stop a running sched
+ * scan in order to start another one (for
+ * net-detect). But in practice this doesn't seem to
+ * work properly, so stop any running sched_scan now.
+ */
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ if (ret)
+ return ret;
+ }
+
+ /* rfkill release can be either for wowlan or netdetect */
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+ IWL_MVM_SCAN_NETDETECT);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
+ return -EBUSY;
+
+ /* save the sched scan matchsets... */
+ if (nd_config->n_match_sets) {
+ mvm->nd_match_sets = kmemdup(nd_config->match_sets,
+ sizeof(*nd_config->match_sets) *
+ nd_config->n_match_sets,
+ GFP_KERNEL);
+ if (mvm->nd_match_sets)
+ mvm->n_nd_match_sets = nd_config->n_match_sets;
+ }
+
+ /* ...and the sched scan channels for later reporting */
+ mvm->nd_channels = kmemdup(nd_config->channels,
+ sizeof(*nd_config->channels) *
+ nd_config->n_channels,
+ GFP_KERNEL);
+ if (mvm->nd_channels)
+ mvm->n_nd_channels = nd_config->n_channels;
+
+ return 0;
+}
+
+static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
+{
+ kfree(mvm->nd_match_sets);
+ mvm->nd_match_sets = NULL;
+ mvm->n_nd_match_sets = 0;
+ kfree(mvm->nd_channels);
+ mvm->nd_channels = NULL;
+ mvm->n_nd_channels = 0;
+}
+
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan,
+ bool test)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *vif = NULL;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ /*
+ * Program the minimum sleep time to 10 seconds, as many
+ * platforms have issues processing a wakeup signal while
+ * still being in the process of suspending.
+ */
+ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+ };
+ struct iwl_host_cmd d3_cfg_cmd = {
+ .id = D3_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data[0] = &d3_cfg_cmd_data,
+ .len[0] = sizeof(d3_cfg_cmd_data),
+ };
+ int ret;
+ int len __maybe_unused;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ if (!wowlan) {
+ /*
+ * mac80211 shouldn't get here, but for D3 test
+ * it doesn't warrant a warning
+ */
+ WARN_ON(!test);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif)) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
+ /* if we're not associated, this must be netdetect */
+ if (!wowlan->nd_config) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_netdetect_config(
+ mvm, wowlan, wowlan->nd_config, vif);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = true;
+ } else {
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out_noreset;
+ ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = false;
+ }
+
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->d3_wake_sysassert)
+ d3_cfg_cmd_data.wakeup_flags |=
+ cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
+#endif
+
+ /*
+ * TODO: this is needed because the firmware is not stopping
+ * the recording automatically before entering D3. This can
+ * be removed once the FW starts doing that.
+ */
+ iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
+ /* must be last -- this switches firmware state */
+ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+ if (ret)
+ goto out;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+ if (len >= sizeof(u32)) {
+ mvm->d3_test_pme_ptr =
+ le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
+ }
+#endif
+ iwl_free_resp(&d3_cfg_cmd);
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+ out:
+ if (ret < 0) {
+ iwl_mvm_free_nd(mvm);
+
+ if (!unified_image) {
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ if (mvm->fw_restart > 0) {
+ mvm->fw_restart--;
+ ieee80211_restart_hw(mvm->hw);
+ }
+ }
+ }
+ out_noreset:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
+{
+ struct iwl_notification_wait wait_d3;
+ static const u16 d3_notif[] = { D3_CONFIG_CMD };
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
+ d3_notif, ARRAY_SIZE(d3_notif),
+ NULL, NULL);
+
+ ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
+ if (ret)
+ goto remove_notif;
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
+ WARN_ON_ONCE(ret);
+ return ret;
+
+remove_notif:
+ iwl_remove_notification(&mvm->notif_wait, &wait_d3);
+ return ret;
+}
+
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_trans *trans = mvm->trans;
+ int ret;
+
+ /* make sure the d0i3 exit work is not pending */
+ flush_work(&mvm->d0i3_exit_work);
+ iwl_mvm_pause_tcm(mvm, true);
+
+ iwl_fw_runtime_suspend(&mvm->fwrt);
+
+ ret = iwl_trans_suspend(trans);
+ if (ret)
+ return ret;
+
+ if (wowlan->any) {
+ trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+
+ if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
+ ret = iwl_mvm_enter_d0i3_sync(mvm);
+
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+ iwl_trans_d3_suspend(trans, false, false);
+
+ return 0;
+ }
+
+ trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
+ return __iwl_mvm_suspend(hw, wowlan, false);
+}
+
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+ u16 pattern_number;
+ u16 qos_seq_ctr[8];
+ u32 wakeup_reasons;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ const u8 *wake_packet;
+};
+
+static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
+{
+ struct sk_buff *pkt = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ u32 reasons = status->wakeup_reasons;
+
+ if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ wakeup_report = NULL;
+ goto report;
+ }
+
+ pm_wakeup_event(mvm->dev, 0);
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
+ wakeup.pattern_idx =
+ status->pattern_number;
+
+ if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ wakeup.disconnect = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
+ wakeup.gtk_rekey_failure = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
+ wakeup.eap_identity_req = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+ wakeup.tcp_connlost = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+ wakeup.tcp_nomoretokens = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+ wakeup.tcp_match = true;
+
+ if (status->wake_packet_bufsize) {
+ int pktsize = status->wake_packet_bufsize;
+ int pktlen = status->wake_packet_length;
+ const u8 *pktdata = status->wake_packet;
+ struct ieee80211_hdr *hdr = (void *)pktdata;
+ int truncated = pktlen - pktsize;
+
+ /* this would be a firmware bug */
+ if (WARN_ON_ONCE(truncated < 0))
+ truncated = 0;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int ivlen = 0, icvlen = 4; /* also FCS */
+
+ pkt = alloc_skb(pktsize, GFP_KERNEL);
+ if (!pkt)
+ goto report;
+
+ skb_put_data(pkt, pktdata, hdrlen);
+ pktdata += hdrlen;
+ pktsize -= hdrlen;
+
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ /*
+ * This is unlocked and using gtk_i(c)vlen,
+ * but since everything is under RTNL still
+ * that's not really a problem - changing
+ * it would be difficult.
+ */
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ ivlen = mvm->gtk_ivlen;
+ icvlen += mvm->gtk_icvlen;
+ } else {
+ ivlen = mvm->ptk_ivlen;
+ icvlen += mvm->ptk_icvlen;
+ }
+ }
+
+ /* if truncated, FCS/ICV is (partially) gone */
+ if (truncated >= icvlen) {
+ icvlen = 0;
+ truncated -= icvlen;
+ } else {
+ icvlen -= truncated;
+ truncated = 0;
+ }
+
+ pktsize -= ivlen + icvlen;
+ pktdata += ivlen;
+
+ skb_put_data(pkt, pktdata, pktsize);
+
+ if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+ goto report;
+ wakeup.packet = pkt->data;
+ wakeup.packet_present_len = pkt->len;
+ wakeup.packet_len = pkt->len - truncated;
+ wakeup.packet_80211 = false;
+ } else {
+ int fcslen = 4;
+
+ if (truncated >= 4) {
+ truncated -= 4;
+ fcslen = 0;
+ } else {
+ fcslen -= truncated;
+ truncated = 0;
+ }
+ pktsize -= fcslen;
+ wakeup.packet = status->wake_packet;
+ wakeup.packet_present_len = pktsize;
+ wakeup.packet_len = pktlen - truncated;
+ wakeup.packet_80211 = true;
+ }
+ }
+
+ report:
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ kfree_skb(pkt);
+}
+
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ u64 pn;
+
+ pn = le64_to_cpu(sc->pn);
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+ seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_key_pn *ptk_pn;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ON(!ptk_pn))
+ return;
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct ieee80211_key_seq seq = {};
+ int i;
+
+ iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ for (i = 1; i < mvm->trans->num_rx_queues; i++)
+ memcpy(ptk_pn->q[i].pn[tid],
+ seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ }
+ } else {
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+ }
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+ struct ieee80211_key_conf *key)
+{
+ int tid;
+
+ BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+ for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+ struct ieee80211_key_seq seq = {};
+
+ iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+ ieee80211_set_key_rx_seq(key, tid, &seq);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
+ struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status *status)
+{
+ union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_wowlan_status *status;
+ void *last_gtk;
+ u32 cipher;
+ bool find_phase, unhandled_cipher;
+ int num_keys;
+};
+
+static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* we support these */
+ break;
+ default:
+ /* everything else (even CMAC for MFP) - disconnect from AP */
+ data->unhandled_cipher = true;
+ return;
+ }
+
+ data->num_keys++;
+
+ /*
+ * pairwise key - update sequence counters only;
+ * note that this assumes no TDLS sessions are active
+ */
+ if (sta) {
+ struct ieee80211_key_seq seq = {};
+ union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+ if (data->find_phase)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
+ sta, key);
+ atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+ iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+ atomic64_set(&key->tx_pn,
+ (u64)seq.tkip.iv16 |
+ ((u64)seq.tkip.iv32 << 16));
+ break;
+ }
+
+ /* that's it for this key */
+ return;
+ }
+
+ if (data->find_phase) {
+ data->last_gtk = key;
+ data->cipher = key->cipher;
+ return;
+ }
+
+ if (data->status->num_of_gtk_rekeys)
+ ieee80211_remove_key(key);
+ else if (data->last_gtk == key)
+ iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status *status)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+ .mvm = mvm,
+ .status = status,
+ };
+ u32 disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+
+ if (!status || !vif->bss_conf.bssid)
+ return false;
+
+ if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
+ return false;
+
+ /* find last GTK that we used initially, if any */
+ gtkdata.find_phase = true;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_keys, &gtkdata);
+ /* not trying to keep connections with MFP/unhandled ciphers */
+ if (gtkdata.unhandled_cipher)
+ return false;
+ if (!gtkdata.num_keys)
+ goto out;
+ if (!gtkdata.last_gtk)
+ return false;
+
+ /*
+ * invalidate all other GTKs that might still exist and update
+ * the one that we used
+ */
+ gtkdata.find_phase = false;
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_keys, &gtkdata);
+
+ if (status->num_of_gtk_rekeys) {
+ struct ieee80211_key_conf *key;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {
+ .conf.cipher = gtkdata.cipher,
+ .conf.keyidx = status->gtk.key_index,
+ };
+
+ switch (gtkdata.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key,
+ WLAN_KEY_LEN_CCMP);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+ /* leave TX MIC key zeroed, we don't use it anyway */
+ memcpy(conf.conf.key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ status->gtk.tkip_mic_key, 8);
+ break;
+ }
+
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ if (IS_ERR(key))
+ return false;
+ iwl_mvm_set_key_rx_seq(mvm, key, status);
+ }
+
+ if (status->num_of_gtk_rekeys) {
+ __be64 replay_ctr =
+ cpu_to_be64(le64_to_cpu(status->replay_ctr));
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ }
+
+out:
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+ return true;
+}
+
+static struct iwl_wowlan_status *
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table[0];
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status *status, *fw_status;
+ int ret, len, status_size;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
+ err_info.valid, err_info.error_id);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ return ERR_PTR(-EIO);
+ }
+
+ /* only for tracing for now */
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ status_size = sizeof(*fw_status);
+
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < status_size) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ fw_status = ERR_PTR(-EIO);
+ goto out_free_resp;
+ }
+
+ status = (void *)cmd.resp_pkt->data;
+ if (len != (status_size +
+ ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ fw_status = ERR_PTR(-EIO);
+ goto out_free_resp;
+ }
+
+ fw_status = kmemdup(status, len, GFP_KERNEL);
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return fw_status;
+}
+
+/* releases the MVM mutex */
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_wowlan_status_data status;
+ struct iwl_wowlan_status *fw_status;
+ int i;
+ bool keep;
+ struct iwl_mvm_sta *mvm_ap_sta;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ if (IS_ERR_OR_NULL(fw_status))
+ goto out_unlock;
+
+ status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(fw_status->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(fw_status->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(fw_status->wake_packet_bufsize);
+ status.wake_packet = fw_status->wake_packet;
+
+ /* still at hard-coded place 0 for D3 image */
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+ if (!mvm_ap_sta)
+ goto out_free;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = status.qos_seq_ctr[i];
+ /* firmware stores last-used value, we store next value */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+
+ /* now we have all the data we need, unlock to avoid mac80211 issues */
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
+
+ kfree(fw_status);
+ return keep;
+
+out_free:
+ kfree(fw_status);
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ return false;
+}
+
+void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status *status)
+{
+ struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+ .mvm = mvm,
+ .status = status,
+ };
+
+ /*
+ * rekey handling requires taking locks that can't be taken now.
+ * however, d0i3 doesn't offload rekey, so we're fine.
+ */
+ if (WARN_ON_ONCE(status->num_of_gtk_rekeys))
+ return;
+
+ /* find last GTK that we used initially, if any */
+ gtkdata.find_phase = true;
+ iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
+
+ gtkdata.find_phase = false;
+ iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
+}
+
+struct iwl_mvm_nd_query_results {
+ u32 matched_profiles;
+ struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+};
+
+static int
+iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *results)
+{
+ struct iwl_scan_offload_profiles_query *query;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
+ return ret;
+ }
+
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < sizeof(*query)) {
+ IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ query = (void *)cmd.resp_pkt->data;
+
+ results->matched_profiles = le32_to_cpu(query->matched_profiles);
+ memcpy(results->matches, query->matches, sizeof(results->matches));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
+#endif
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct cfg80211_wowlan_nd_info *net_detect = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ struct iwl_mvm_nd_query_results query;
+ struct iwl_wowlan_status *fw_status;
+ unsigned long matched_profiles;
+ u32 reasons = 0;
+ int i, j, n_matches, ret;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ if (!IS_ERR_OR_NULL(fw_status)) {
+ reasons = le32_to_cpu(fw_status->wakeup_reasons);
+ kfree(fw_status);
+ }
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+ goto out;
+
+ ret = iwl_mvm_netdetect_query_results(mvm, &query);
+ if (ret || !query.matched_profiles) {
+ wakeup_report = NULL;
+ goto out;
+ }
+
+ matched_profiles = query.matched_profiles;
+ if (mvm->n_nd_match_sets) {
+ n_matches = hweight_long(matched_profiles);
+ } else {
+ IWL_ERR(mvm, "no net detect match information available\n");
+ n_matches = 0;
+ }
+
+ net_detect = kzalloc(sizeof(*net_detect) +
+ (n_matches * sizeof(net_detect->matches[0])),
+ GFP_KERNEL);
+ if (!net_detect || !n_matches)
+ goto out_report_nd;
+
+ for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ struct iwl_scan_offload_profile_match *fw_match;
+ struct cfg80211_wowlan_nd_match *match;
+ int idx, n_channels = 0;
+
+ fw_match = &query.matches[i];
+
+ for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
+ n_channels += hweight8(fw_match->matching_channels[j]);
+
+ match = kzalloc(sizeof(*match) +
+ (n_channels * sizeof(*match->channels)),
+ GFP_KERNEL);
+ if (!match)
+ goto out_report_nd;
+
+ net_detect->matches[net_detect->n_matches++] = match;
+
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+ */
+ idx = mvm->n_nd_match_sets - i - 1;
+ match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+ memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
+ match->ssid.ssid_len);
+
+ if (mvm->n_nd_channels < n_channels)
+ continue;
+
+ for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
+ if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[j]->center_freq;
+ }
+
+out_report_nd:
+ wakeup.net_detect = net_detect;
+out:
+ iwl_mvm_free_nd(mvm);
+
+ mutex_unlock(&mvm->mutex);
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+
+ if (net_detect) {
+ for (i = 0; i < net_detect->n_matches; i++)
+ kfree(net_detect->matches[i]);
+ kfree(net_detect);
+ }
+}
+
+static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+ u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+
+ if (!mvm->store_d3_resume_sram)
+ return;
+
+ if (!mvm->d3_resume_sram) {
+ mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
+ if (!mvm->d3_resume_sram)
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
+#endif
+}
+
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_resume_disconnect(vif);
+}
+
+static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+{
+ struct ieee80211_vif *vif = NULL;
+ int ret = 1;
+ enum iwl_d3_status d3_status;
+ bool keep = false;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+ bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+
+ mutex_lock(&mvm->mutex);
+
+ /* get the BSS vif pointer again */
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ goto err;
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
+ if (ret)
+ goto err;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ goto err;
+ }
+
+ /* query SRAM first in case we want event logging */
+ iwl_mvm_read_d3_sram(mvm);
+
+ if (d0i3_first) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (ret < 0) {
+ IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+ ret);
+ goto err;
+ }
+ }
+
+ /*
+ * Query the current location and source from the D3 firmware so we
+ * can play it back when we re-intiailize the D0 firmware
+ */
+ iwl_mvm_update_changed_regdom(mvm);
+
+ if (!unified_image)
+ /* Re-configure default SAR profile */
+ iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+ if (mvm->net_detect) {
+ /* If this is a non-unified image, we restart the FW,
+ * so no need to stop the netdetect scan. If that
+ * fails, continue and try to get the wake-up reasons,
+ * but trigger a HW restart by keeping a failure code
+ * in ret.
+ */
+ if (unified_image)
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
+
+ iwl_mvm_query_netdetect_reasons(mvm, vif);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+ } else {
+ keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
+ /* has unlocked the mutex, so skip that */
+ goto out_iterate;
+ }
+
+err:
+ iwl_mvm_free_nd(mvm);
+ mutex_unlock(&mvm->mutex);
+
+out_iterate:
+ if (!test)
+ ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+
+out:
+ /* no need to reset the device in unified images, if successful */
+ if (unified_image && !ret) {
+ /* nothing else to do if we already sent D0I3_END_CMD */
+ if (d0i3_first)
+ return 0;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (!ret)
+ return 0;
+ }
+
+ /*
+ * Reconfigure the device in one of the following cases:
+ * 1. We are not using a unified image
+ * 2. We are using a unified image but had an error while exiting D3
+ */
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+ set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+ /*
+ * When switching images we return 1, which causes mac80211
+ * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
+ * This type of reconfig calls iwl_mvm_restart_complete(),
+ * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
+ * to take the reference here.
+ */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+ return 1;
+}
+
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
+{
+ iwl_trans_resume(mvm->trans);
+
+ return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+ bool exit_now;
+ enum iwl_d3_status d3_status;
+ struct iwl_trans *trans = mvm->trans;
+
+ iwl_trans_d3_resume(trans, &d3_status, false, false);
+
+ /*
+ * make sure to clear D0I3_DEFER_WAKEUP before
+ * calling iwl_trans_resume(), which might wait
+ * for d0i3 exit completion.
+ */
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ if (exit_now) {
+ IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+ _iwl_mvm_exit_d0i3(mvm);
+ }
+
+ iwl_trans_resume(trans);
+
+ if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
+ int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
+
+ if (ret)
+ return ret;
+ /*
+ * d0i3 exit will be deferred until reconfig_complete.
+ * make sure there we are out of d0i3.
+ */
+ }
+ return 0;
+}
+
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ ret = iwl_mvm_resume_d0i3(mvm);
+ else
+ ret = iwl_mvm_resume_d3(mvm);
+
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
+ iwl_mvm_resume_tcm(mvm);
+
+ iwl_fw_runtime_resume(&mvm->fwrt);
+
+ return ret;
+}
+
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ device_set_wakeup_enable(mvm->trans->dev, enabled);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ int err;
+
+ if (mvm->d3_test_active)
+ return -EBUSY;
+
+ file->private_data = inode->i_private;
+
+ ieee80211_stop_queues(mvm->hw);
+ synchronize_net();
+
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
+ iwl_mvm_pause_tcm(mvm, true);
+
+ iwl_fw_runtime_suspend(&mvm->fwrt);
+
+ /* start pseudo D3 */
+ rtnl_lock();
+ err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+ rtnl_unlock();
+ if (err > 0)
+ err = -EINVAL;
+ if (err) {
+ ieee80211_wake_queues(mvm->hw);
+ return err;
+ }
+ mvm->d3_test_active = true;
+ mvm->keep_vif = NULL;
+ return 0;
+}
+
+static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u32 pme_asserted;
+
+ while (true) {
+ /* read pme_ptr if available */
+ if (mvm->d3_test_pme_ptr) {
+ pme_asserted = iwl_trans_read_mem32(mvm->trans,
+ mvm->d3_test_pme_ptr);
+ if (pme_asserted)
+ break;
+ }
+
+ if (msleep_interruptible(100))
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (_data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_connection_loss(vif);
+}
+
+static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ mvm->d3_test_active = false;
+
+ rtnl_lock();
+ __iwl_mvm_resume(mvm, true);
+ rtnl_unlock();
+
+ iwl_mvm_resume_tcm(mvm);
+
+ iwl_fw_runtime_resume(&mvm->fwrt);
+
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+ if (!unified_image) {
+ int remaining_time = 10;
+
+ ieee80211_restart_hw(mvm->hw);
+
+ /* wait for restart and disconnect all interfaces */
+ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ remaining_time > 0) {
+ remaining_time--;
+ msleep(1000);
+ }
+
+ if (remaining_time == 0)
+ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
+
+ ieee80211_wake_queues(mvm->hw);
+
+ return 0;
+}
+
+const struct file_operations iwl_dbgfs_d3_test_ops = {
+ .llseek = no_llseek,
+ .open = iwl_mvm_d3_test_open,
+ .read = iwl_mvm_d3_test_read,
+ .release = iwl_mvm_d3_test_release,
+};
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 000000000..5287f21d7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,1573 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw/api/tof.h"
+#include "debugfs.h"
+
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_dbgfs_pm_mask param, int val)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+ dbgfs_pm->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+ int dtimper = vif->bss_conf.dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+ if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+ IWL_WARN(mvm,
+ "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+ val * MSEC_PER_SEC, 3 * dtimper_msec);
+ dbgfs_pm->keep_alive_seconds = val;
+ break;
+ }
+ case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+ IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+ val ? "enabled" : "disabled");
+ dbgfs_pm->skip_over_dtim = val;
+ break;
+ case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+ IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+ dbgfs_pm->skip_dtim_periods = val;
+ break;
+ case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+ dbgfs_pm->rx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+ dbgfs_pm->tx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_ENA:
+ IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+ dbgfs_pm->lprx_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+ IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+ dbgfs_pm->lprx_rssi_threshold = val;
+ break;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+ IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+ dbgfs_pm->uapsd_misbehaving = val;
+ break;
+ case MVM_DEBUGFS_PM_USE_PS_POLL:
+ IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
+ dbgfs_pm->use_ps_poll = val;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_pm_mask param;
+ int val, ret;
+
+ if (!strncmp("keep_alive=", buf, 11)) {
+ if (sscanf(buf + 11, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+ } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+ if (sscanf(buf + 15, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+ } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+ } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+ } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+ } else if (!strncmp("lprx=", buf, 5)) {
+ if (sscanf(buf + 5, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_ENA;
+ } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+ if (sscanf(buf + 20, "%d", &val) != 1)
+ return -EINVAL;
+ if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+ POWER_LPRX_RSSI_THRESHOLD_MIN)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+ } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+ } else if (!strncmp("use_ps_poll=", buf, 12)) {
+ if (sscanf(buf + 12, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_USE_PS_POLL;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_pm(mvm, vif, param, val);
+ ret = iwl_mvm_power_update_mac(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+ vif->bss_conf.txpower);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 ap_sta_id;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ ap_sta_id = mvmvif->ap_sta_id;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+ break;
+ default:
+ break;
+ }
+
+ pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+ mvmvif->id, mvmvif->color);
+ pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+ vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
+ mvm->tcm.result.load[mvmvif->id]);
+ pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+ for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+ i, mvmvif->queue_params[i].txop,
+ mvmvif->queue_params[i].cw_min,
+ mvmvif->queue_params[i].cw_max,
+ mvmvif->queue_params[i].aifs,
+ mvmvif->queue_params[i].uapsd);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ ap_sta_id != IWL_MVM_INVALID_STA) {
+ struct iwl_mvm_sta *mvm_sta;
+
+ mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+ if (mvm_sta) {
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "ap_sta_id %d - reduced Tx power %d\n",
+ ap_sta_id,
+ mvm_sta->bt_reduced_txpower);
+ }
+ }
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "idle rx chains %d, active rx chains: %d\n",
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ rcu_read_unlock();
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+ enum iwl_dbgfs_bf_mask param, int value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ dbgfs_bf->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_BF_ENERGY_DELTA:
+ dbgfs_bf->bf_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+ dbgfs_bf->bf_roaming_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_STATE:
+ dbgfs_bf->bf_roaming_state = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+ dbgfs_bf->bf_enable_beacon_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_DEBUG_FLAG:
+ dbgfs_bf->bf_debug_flag = value;
+ break;
+ case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+ dbgfs_bf->bf_escape_timer = value;
+ break;
+ case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+ dbgfs_bf->ba_enable_beacon_abort = value;
+ break;
+ case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+ dbgfs_bf->ba_escape_timer = value;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_bf_mask param;
+ int value, ret = 0;
+
+ if (!strncmp("bf_energy_delta=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+ if (sscanf(buf+17, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_STATE_MIN ||
+ value > IWL_BF_ROAMING_STATE_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_STATE;
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+ } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+ } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+ if (sscanf(buf+14, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+ } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+ value > IWL_BF_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+ } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+ value > IWL_BA_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+ } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+ if (sscanf(buf+23, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_bf(vif, param, value);
+ if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ else
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+ };
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ if (mvmvif->bf_data.bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+ else
+ cmd.bf_enable_beacon_filter = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+ le32_to_cpu(cmd.bf_debug_flag));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+ le32_to_cpu(cmd.bf_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+ le32_to_cpu(cmd.ba_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+ int len = strlen(name);
+
+ return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 curr_gp2;
+ u64 curr_os;
+ s64 diff;
+ char buf[64];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ mutex_unlock(&mvm->mutex);
+
+ do_div(curr_os, NSEC_PER_USEC);
+ diff = curr_os - curr_gp2;
+ pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 value;
+ int ret = -EINVAL;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("tof_disabled=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.tof_disabled = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.one_sided_disabled = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.is_debug_mode = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("is_buf=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.tof_cfg.is_buf_required = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_config_cmd(mvm);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_config_cmd *cmd;
+
+ cmd = &mvm->tof_data.tof_cfg;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+ cmd->tof_disabled);
+ pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+ cmd->one_sided_disabled);
+ pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+ cmd->is_debug_mode);
+ pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+ cmd->is_buf_required);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 value;
+ int ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("burst_period=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (!ret)
+ mvm->tof_data.responder_cfg.burst_period =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.min_delta_ftm = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("burst_duration=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.burst_duration = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("abort_responder=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.abort_responder = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("get_ch_est=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.get_ch_est = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("channel_num=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.channel_num = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("bandwidth=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.bandwidth = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("rate=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.rate = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("bssid=", buf);
+ if (data) {
+ u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("toa_offset=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.toa_offset =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("center_freq=", buf);
+ if (data) {
+ struct iwl_tof_responder_config_cmd *cmd =
+ &mvm->tof_data.responder_cfg;
+
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ enum nl80211_band band = (cmd->channel_num <= 14) ?
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
+ struct ieee80211_channel chn = {
+ .band = band,
+ .center_freq = ieee80211_channel_to_frequency(
+ cmd->channel_num, band),
+ };
+ struct cfg80211_chan_def chandef = {
+ .chan = &chn,
+ .center_freq1 =
+ ieee80211_channel_to_frequency(value,
+ band),
+ };
+
+ cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
+ }
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ftm_per_burst = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("asap_mode=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.responder_cfg.asap_mode = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_responder_config_cmd *cmd;
+
+ cmd = &mvm->tof_data.responder_cfg;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+ le16_to_cpu(cmd->burst_period));
+ pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+ cmd->burst_duration);
+ pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+ cmd->bandwidth);
+ pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+ cmd->channel_num);
+ pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+ cmd->ctrl_ch_position);
+ pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+ cmd->bssid);
+ pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+ cmd->min_delta_ftm);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+ cmd->num_of_burst_exp);
+ pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+ pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+ cmd->abort_responder);
+ pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+ cmd->get_ch_est);
+ pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+ cmd->recv_sta_req_params);
+ pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+ cmd->ftm_per_burst);
+ pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+ cmd->ftm_resp_ts_avail);
+ pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+ cmd->asap_mode);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tsf_timer_offset_msecs = %d\n",
+ le16_to_cpu(cmd->tsf_timer_offset_msecs));
+ pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+ le16_to_cpu(cmd->toa_offset));
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 value;
+ int ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("request_id=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.request_id = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("initiator=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.initiator = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.one_sided_los_disable = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("req_timeout=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.req_timeout = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("report_policy=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.report_policy = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_random=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.macaddr_random = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("num_of_ap=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req.num_of_ap = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_template=", buf);
+ if (data) {
+ u8 mac[ETH_ALEN];
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+ if (data) {
+ u8 mac[ETH_ALEN];
+
+ if (!mac_pton(data, mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ap=", buf);
+ if (data) {
+ struct iwl_tof_range_req_ap_entry ap = {};
+ int size = sizeof(struct iwl_tof_range_req_ap_entry);
+ u16 burst_period;
+ u8 *mac = ap.bssid;
+ unsigned int i;
+
+ if (sscanf(data, "%u %hhd %hhd %hhd"
+ "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+ "%hhd %hhd %hd"
+ "%hhd %hhd %d"
+ "%hhx %hhd %hhd %hhd",
+ &i, &ap.channel_num, &ap.bandwidth,
+ &ap.ctrl_ch_position,
+ mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+ &ap.measure_type, &ap.num_of_bursts,
+ &burst_period,
+ &ap.samples_per_burst, &ap.retries_per_sample,
+ &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+ &ap.enable_dyn_ack, &ap.rssi) != 20) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (i >= IWL_MVM_TOF_MAX_APS) {
+ IWL_ERR(mvm, "Invalid AP index %d\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ap.burst_period = cpu_to_le16(burst_period);
+
+ memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_request=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value)
+ ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+ goto out;
+ }
+
+ ret = -EINVAL;
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_range_req_cmd *cmd;
+ int i;
+
+ cmd = &mvm->tof_data.range_req;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+ cmd->request_id);
+ pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+ cmd->initiator);
+ pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+ cmd->one_sided_los_disable);
+ pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+ cmd->req_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+ cmd->report_policy);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+ cmd->macaddr_random);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+ cmd->macaddr_template);
+ pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+ cmd->macaddr_mask);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+ cmd->num_of_ap);
+ for (i = 0; i < cmd->num_of_ap; i++) {
+ struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ap %.2d: channel_num=%hhd bw=%hhd"
+ " control=%hhd bssid=%pM type=%hhd"
+ " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
+ " retries=%hhd tsf_delta=%d"
+ " tsf_delta_direction=%hhd location_req=0x%hhx "
+ " asap=%hhd enable=%hhd rssi=%hhd\n",
+ i, ap->channel_num, ap->bandwidth,
+ ap->ctrl_ch_position, ap->bssid,
+ ap->measure_type, ap->num_of_bursts,
+ ap->burst_period, ap->samples_per_burst,
+ ap->retries_per_sample, ap->tsf_delta,
+ ap->tsf_delta_direction,
+ ap->location_req, ap->asap_mode,
+ ap->enable_dyn_ack, ap->rssi);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 value;
+ int ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+ cpu_to_le16(value);
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.min_delta_ftm = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+ value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value)
+ ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+ goto out;
+ }
+
+ ret = -EINVAL;
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_tof_range_req_ext_cmd *cmd;
+
+ cmd = &mvm->tof_data.range_req_ext;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tsf_timer_offset_msec = %hd\n",
+ cmd->tsf_timer_offset_msec);
+ pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
+ cmd->min_delta_ftm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw20M = %hhd\n",
+ cmd->ftm_format_and_bw20M);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw40M = %hhd\n",
+ cmd->ftm_format_and_bw40M);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ftm_format_and_bw80M = %hhd\n",
+ cmd->ftm_format_and_bw80M);
+
+ mutex_unlock(&mvm->mutex);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+ char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 value;
+ int abort_id, ret = 0;
+ char *data;
+
+ mutex_lock(&mvm->mutex);
+
+ data = iwl_dbgfs_is_match("abort_id=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0)
+ mvm->tof_data.last_abort_id = value;
+ goto out;
+ }
+
+ data = iwl_dbgfs_is_match("send_range_abort=", buf);
+ if (data) {
+ ret = kstrtou32(data, 10, &value);
+ if (ret == 0 && value) {
+ abort_id = mvm->tof_data.last_abort_id;
+ ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[32];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ int last_abort_id;
+
+ mutex_lock(&mvm->mutex);
+ last_abort_id = mvm->tof_data.last_abort_id;
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+ last_abort_id);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char *buf;
+ int pos = 0;
+ const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+ struct iwl_tof_range_rsp_ntfy *cmd;
+ int i, ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ cmd = &mvm->tof_data.range_resp;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+ cmd->request_id);
+ pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+ cmd->request_status);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+ cmd->last_in_batch);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+ cmd->num_of_aps);
+ for (i = 0; i < cmd->num_of_aps; i++) {
+ struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
+ " rtt=%d rtt_var=%d rtt_spread=%d"
+ " rssi=%hhd rssi_spread=%hhd"
+ " range=%d range_var=%d"
+ " time_stamp=%d\n",
+ i, ap->bssid, ap->measure_status,
+ ap->measure_bw,
+ ap->rtt, ap->rtt_variance, ap->rtt_spread,
+ ap->rssi, ap->rssi_spread, ap->range,
+ ap->range_variance, ap->timestamp);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[30] = {};
+ int len;
+
+ len = scnprintf(buf, sizeof(buf) - 1,
+ "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+ !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
+ !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
+ !!(mvmvif->low_latency & LOW_LATENCY_VCMD));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[20];
+ int len;
+
+ len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ bool ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? count : -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ /* make sure the channel context is assigned */
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ mutex_unlock(&mvm->mutex);
+ return -EINVAL;
+ }
+
+ phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
+ rcu_read_unlock();
+
+ mvm->dbgfs_rx_phyinfo = value;
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[8];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "0x%04x\n",
+ mvmvif->mvm->dbgfs_rx_phyinfo);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static void iwl_dbgfs_quota_check(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int *ret = data;
+
+ if (mvmvif->dbgfs_quota_min)
+ *ret = -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 95)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->dbgfs_quota_min = 0;
+ ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_dbgfs_quota_check, &ret);
+ if (ret == 0) {
+ mvmvif->dbgfs_quota_min = value;
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[10];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const char * const chanwidths[] = {
+ [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+ [NL80211_CHAN_WIDTH_20] = "ht20",
+ [NL80211_CHAN_WIDTH_40] = "ht40",
+ [NL80211_CHAN_WIDTH_80] = "vht80",
+ [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+ [NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+
+
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[100];
+
+ /*
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ */
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
+ return;
+
+ mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+
+ if (!mvmvif->dbgfs_dir) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+ dbgfs_dir);
+ return;
+ }
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
+ MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvmvif == mvm->bf_allowed_vif)
+ MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+ !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+ mvmvif->dbgfs_dir, 0600);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+ 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+ 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+ 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+ 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+ 0400);
+ }
+
+ /*
+ * Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+ */
+ snprintf(buf, 100, "../../../%pd3/%pd",
+ dbgfs_dir,
+ mvmvif->dbgfs_dir);
+
+ mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
+ mvm->debugfs_dir, buf);
+ if (!mvmvif->dbgfs_slink)
+ IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
+ dbgfs_dir);
+ return;
+err:
+ IWL_ERR(mvm, "Can't create debugfs entity\n");
+}
+
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ debugfs_remove(mvmvif->dbgfs_slink);
+ mvmvif->dbgfs_slink = NULL;
+
+ debugfs_remove_recursive(mvmvif->dbgfs_dir);
+ mvmvif->dbgfs_dir = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
new file mode 100644
index 000000000..05b774199
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -0,0 +1,2067 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/vmalloc.h>
+#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
+
+#include "mvm.h"
+#include "sta.h"
+#include "iwl-io.h"
+#include "debugfs.h"
+#include "fw/error-dump.h"
+
+static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos, budget;
+
+ if (!iwl_mvm_is_ctdp_supported(mvm))
+ return -EOPNOTSUPP;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
+ mutex_unlock(&mvm->mutex);
+
+ if (budget < 0)
+ return budget;
+
+ pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ if (!iwl_mvm_is_ctdp_supported(mvm))
+ return -EOPNOTSUPP;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ iwl_mvm_enter_ctkill(mvm);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ u32 flush_arg;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ if (kstrtou32(buf, 0, &flush_arg))
+ return -EINVAL;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "FLUSHING all tids queues on sta_id = %d\n",
+ flush_arg);
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+ mutex_unlock(&mvm->mutex);
+ return ret;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n",
+ flush_arg);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_sta *mvmsta;
+ int sta_id, drain, ret;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
+ return -EINVAL;
+ if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
+ return -EINVAL;
+ if (drain < 0 || drain > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+ if (!mvmsta)
+ ret = -ENOENT;
+ else
+ ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ const struct fw_img *img;
+ unsigned int ofs, len;
+ size_t ret;
+ u8 *ptr;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EINVAL;
+
+ /* default is to dump the entire data segment */
+ img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ if (mvm->dbgfs_sram_len) {
+ ofs = mvm->dbgfs_sram_offset;
+ len = mvm->dbgfs_sram_len;
+ }
+
+ ptr = kzalloc(len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
+
+ kfree(ptr);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ const struct fw_img *img;
+ u32 offset, len;
+ u32 img_offset, img_len;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EINVAL;
+
+ img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+ img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ if ((offset & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ if (offset + len > img_offset + img_len)
+ return -EINVAL;
+
+ mvm->dbgfs_sram_offset = offset;
+ mvm->dbgfs_sram_len = len;
+ } else {
+ mvm->dbgfs_sram_offset = 0;
+ mvm->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos;
+
+ if (!mvm->temperature_test)
+ pos = scnprintf(buf , sizeof(buf), "disabled\n");
+ else
+ pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/*
+ * Set NIC Temperature
+ * Cause the driver to ignore the actual NIC temperature reported by the FW
+ * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -
+ * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX
+ * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE
+ */
+static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int temperature;
+
+ if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test)
+ return -EIO;
+
+ if (kstrtoint(buf, 10, &temperature))
+ return -EINVAL;
+ /* not a legal temperature */
+ if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
+ temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
+ temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
+ if (!mvm->temperature_test)
+ goto out;
+
+ mvm->temperature_test = false;
+ /* Since we can't read the temp while awake, just set
+ * it to zero until we get the next RX stats from the
+ * firmware.
+ */
+ mvm->temperature = 0;
+ } else {
+ mvm->temperature_test = true;
+ mvm->temperature = temperature;
+ }
+ IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
+ mvm->temperature_test ? "En" : "Dis" ,
+ mvm->temperature);
+ /* handle the temperature change */
+ iwl_mvm_tt_handler(mvm);
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos, ret;
+ s32 temp;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_get_temp(mvm, &temp);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret)
+ return -EIO;
+
+ pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#ifdef CONFIG_ACPI
+static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[256];
+ int pos = 0;
+ int bufsz = sizeof(buf);
+ int tbl_idx;
+ u8 *value;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+ if (tbl_idx < 0) {
+ mutex_unlock(&mvm->mutex);
+ return tbl_idx;
+ }
+
+ if (!tbl_idx) {
+ pos = scnprintf(buf, bufsz,
+ "SAR geographic profile disabled\n");
+ } else {
+ value = &mvm->geo_profiles[tbl_idx - 1].values[0];
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Use geographic profile %d\n", tbl_idx);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "2.4GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n",
+ value[1], value[2], value[0]);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "5.2GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n",
+ value[4], value[5], value[3]);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+#endif
+
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct ieee80211_sta *sta;
+ char buf[400];
+ int i, pos = 0, bufsz = sizeof(buf);
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta)
+ pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+ else if (IS_ERR(sta))
+ pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
+ PTR_ERR(sta));
+ else
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+ static const size_t bufsz = 2048;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+ lq_sta->pers.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "A-MPDU size limit %d\n",
+ lq_sta->pers.dbg_agg_frame_count_lim);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "valid_tx_ant %s%s%s\n",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "last tx rate=0x%X ",
+ lq_sta->last_rate_n_flags);
+
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+ lq_sta->last_rate_n_flags);
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+ mvm->disable_power_off);
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+ mvm->disable_power_off_d3);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret, val;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (!strncmp("disable_power_off_d0=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off = val;
+ } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off_d3 = val;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_power_update_device(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
+ int pos, int bufsz)
+{
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
+
+ BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
+ BT_MBOX_PRINT(0, LE_PROF1, false);
+ BT_MBOX_PRINT(0, LE_PROF2, false);
+ BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
+ BT_MBOX_PRINT(0, CHL_SEQ_N, false);
+ BT_MBOX_PRINT(0, INBAND_S, false);
+ BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
+ BT_MBOX_PRINT(0, LE_SCAN, false);
+ BT_MBOX_PRINT(0, LE_ADV, false);
+ BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
+ BT_MBOX_PRINT(0, OPEN_CON_1, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
+
+ BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
+ BT_MBOX_PRINT(1, IP_SR, false);
+ BT_MBOX_PRINT(1, LE_MSTR, false);
+ BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
+ BT_MBOX_PRINT(1, MSG_TYPE, false);
+ BT_MBOX_PRINT(1, SSN, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
+
+ BT_MBOX_PRINT(2, SNIFF_ACT, false);
+ BT_MBOX_PRINT(2, PAG, false);
+ BT_MBOX_PRINT(2, INQUIRY, false);
+ BT_MBOX_PRINT(2, CONN, false);
+ BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
+ BT_MBOX_PRINT(2, DISC, false);
+ BT_MBOX_PRINT(2, SCO_TX_ACT, false);
+ BT_MBOX_PRINT(2, SCO_RX_ACT, false);
+ BT_MBOX_PRINT(2, ESCO_RE_TX, false);
+ BT_MBOX_PRINT(2, SCO_DURATION, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
+
+ BT_MBOX_PRINT(3, SCO_STATE, false);
+ BT_MBOX_PRINT(3, SNIFF_STATE, false);
+ BT_MBOX_PRINT(3, A2DP_STATE, false);
+ BT_MBOX_PRINT(3, A2DP_SRC, false);
+ BT_MBOX_PRINT(3, ACL_STATE, false);
+ BT_MBOX_PRINT(3, MSTR_STATE, false);
+ BT_MBOX_PRINT(3, OBX_STATE, false);
+ BT_MBOX_PRINT(3, OPEN_CON_2, false);
+ BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
+ BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
+ BT_MBOX_PRINT(3, INBAND_P, false);
+ BT_MBOX_PRINT(3, MSG_TYPE_2, false);
+ BT_MBOX_PRINT(3, SSN_2, false);
+ BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
+
+ return pos;
+}
+
+static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+ char *buf;
+ int ret, pos = 0, bufsz = sizeof(char) * 1024;
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+ notif->bt_ci_compliance);
+ pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ pos += scnprintf(buf + pos,
+ bufsz - pos, "bt_activity_grading = %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+ notif->rrc_status & 0xF);
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+ notif->ttc_status & 0xF);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
+ IWL_MVM_BT_COEX_SYNC2SCO);
+ pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n",
+ IWL_MVM_BT_COEX_MPLUT);
+
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+#undef BT_MBOX_PRINT
+
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+ char buf[256];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tPrimary Channel Bitmap 0x%016llx\n",
+ le64_to_cpu(cmd->bt_primary_ci));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tSecondary Channel Bitmap 0x%016llx\n",
+ le64_to_cpu(cmd->bt_secondary_ci));
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 bt_tx_prio;
+
+ if (sscanf(buf, "%u", &bt_tx_prio) != 1)
+ return -EINVAL;
+ if (bt_tx_prio > 4)
+ return -EINVAL;
+
+ mvm->bt_tx_prio = bt_tx_prio;
+
+ return count;
+}
+
+static ssize_t
+iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ static const char * const modes_str[BT_FORCE_ANT_MAX] = {
+ [BT_FORCE_ANT_DIS] = "dis",
+ [BT_FORCE_ANT_AUTO] = "auto",
+ [BT_FORCE_ANT_BT] = "bt",
+ [BT_FORCE_ANT_WIFI] = "wifi",
+ };
+ int ret, bt_force_ant_mode;
+
+ for (bt_force_ant_mode = 0;
+ bt_force_ant_mode < ARRAY_SIZE(modes_str);
+ bt_force_ant_mode++) {
+ if (!strcmp(buf, modes_str[bt_force_ant_mode]))
+ break;
+ }
+
+ if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
+ return -EINVAL;
+
+ ret = 0;
+ mutex_lock(&mvm->mutex);
+ if (mvm->bt_force_ant_mode == bt_force_ant_mode)
+ goto out;
+
+ mvm->bt_force_ant_mode = bt_force_ant_mode;
+ IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
+ modes_str[mvm->bt_force_ant_mode]);
+
+ if (iwl_mvm_firmware_running(mvm))
+ ret = iwl_mvm_send_bt_init_conf(mvm);
+ else
+ ret = 0;
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char *buff, *pos, *endpos;
+ static const size_t bufsz = 1024;
+ int ret;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n",
+ mvm->trans->cfg->fw_name_pre);
+ pos += scnprintf(pos, endpos - pos, "FW: %s\n",
+ mvm->fwrt.fw->human_readable);
+ pos += scnprintf(pos, endpos - pos, "Device: %s\n",
+ mvm->fwrt.trans->cfg->name);
+ pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
+ mvm->fwrt.dev->bus->name);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
+#define PRINT_STATS_LE32(_struct, _memb) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ fmt_table, #_memb, \
+ le32_to_cpu(_struct->_memb))
+
+static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ static const char *fmt_table = "\t%-30s %10u\n";
+ static const char *fmt_header = "%-32s\n";
+ int pos = 0;
+ char *buf;
+ int ret;
+ size_t bufsz;
+
+ if (iwl_mvm_has_new_rx_stats_api(mvm))
+ bufsz = ((sizeof(struct mvm_statistics_rx) /
+ sizeof(__le32)) * 43) + (4 * 33) + 1;
+ else
+ /* 43 = size of each data line; 33 = size of each header */
+ bufsz = ((sizeof(struct mvm_statistics_rx_v3) /
+ sizeof(__le32)) * 43) + (4 * 33) + 1;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ if (iwl_mvm_firmware_running(mvm))
+ iwl_mvm_request_statistics(mvm, false);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm;
+
+ PRINT_STATS_LE32(ofdm, ina_cnt);
+ PRINT_STATS_LE32(ofdm, fina_cnt);
+ PRINT_STATS_LE32(ofdm, plcp_err);
+ PRINT_STATS_LE32(ofdm, crc32_err);
+ PRINT_STATS_LE32(ofdm, overrun_err);
+ PRINT_STATS_LE32(ofdm, early_overrun_err);
+ PRINT_STATS_LE32(ofdm, crc32_good);
+ PRINT_STATS_LE32(ofdm, false_alarm_cnt);
+ PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
+ PRINT_STATS_LE32(ofdm, sfd_timeout);
+ PRINT_STATS_LE32(ofdm, fina_timeout);
+ PRINT_STATS_LE32(ofdm, unresponded_rts);
+ PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(ofdm, sent_ack_cnt);
+ PRINT_STATS_LE32(ofdm, sent_cts_cnt);
+ PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(ofdm, dsp_self_kill);
+ PRINT_STATS_LE32(ofdm, mh_format_err);
+ PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
+ PRINT_STATS_LE32(ofdm, reserved);
+ } else {
+ struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm;
+
+ PRINT_STATS_LE32(ofdm, unresponded_rts);
+ PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(ofdm, dsp_self_kill);
+ PRINT_STATS_LE32(ofdm, reserved);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck;
+
+ PRINT_STATS_LE32(cck, ina_cnt);
+ PRINT_STATS_LE32(cck, fina_cnt);
+ PRINT_STATS_LE32(cck, plcp_err);
+ PRINT_STATS_LE32(cck, crc32_err);
+ PRINT_STATS_LE32(cck, overrun_err);
+ PRINT_STATS_LE32(cck, early_overrun_err);
+ PRINT_STATS_LE32(cck, crc32_good);
+ PRINT_STATS_LE32(cck, false_alarm_cnt);
+ PRINT_STATS_LE32(cck, fina_sync_err_cnt);
+ PRINT_STATS_LE32(cck, sfd_timeout);
+ PRINT_STATS_LE32(cck, fina_timeout);
+ PRINT_STATS_LE32(cck, unresponded_rts);
+ PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(cck, sent_ack_cnt);
+ PRINT_STATS_LE32(cck, sent_cts_cnt);
+ PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(cck, dsp_self_kill);
+ PRINT_STATS_LE32(cck, mh_format_err);
+ PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
+ PRINT_STATS_LE32(cck, reserved);
+ } else {
+ struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck;
+
+ PRINT_STATS_LE32(cck, unresponded_rts);
+ PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(cck, dsp_self_kill);
+ PRINT_STATS_LE32(cck, reserved);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_non_phy_v3 *general =
+ &mvm->rx_stats_v3.general;
+
+ PRINT_STATS_LE32(general, bogus_cts);
+ PRINT_STATS_LE32(general, bogus_ack);
+ PRINT_STATS_LE32(general, non_bssid_frames);
+ PRINT_STATS_LE32(general, filtered_frames);
+ PRINT_STATS_LE32(general, non_channel_beacons);
+ PRINT_STATS_LE32(general, channel_beacons);
+ PRINT_STATS_LE32(general, num_missed_bcon);
+ PRINT_STATS_LE32(general, adc_rx_saturation_time);
+ PRINT_STATS_LE32(general, ina_detection_search_time);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+ PRINT_STATS_LE32(general, interference_data_flag);
+ PRINT_STATS_LE32(general, channel_load);
+ PRINT_STATS_LE32(general, dsp_false_alarms);
+ PRINT_STATS_LE32(general, beacon_rssi_a);
+ PRINT_STATS_LE32(general, beacon_rssi_b);
+ PRINT_STATS_LE32(general, beacon_rssi_c);
+ PRINT_STATS_LE32(general, beacon_energy_a);
+ PRINT_STATS_LE32(general, beacon_energy_b);
+ PRINT_STATS_LE32(general, beacon_energy_c);
+ PRINT_STATS_LE32(general, num_bt_kills);
+ PRINT_STATS_LE32(general, mac_id);
+ PRINT_STATS_LE32(general, directed_data_mpdu);
+ } else {
+ struct mvm_statistics_rx_non_phy *general =
+ &mvm->rx_stats.general;
+
+ PRINT_STATS_LE32(general, bogus_cts);
+ PRINT_STATS_LE32(general, bogus_ack);
+ PRINT_STATS_LE32(general, non_channel_beacons);
+ PRINT_STATS_LE32(general, channel_beacons);
+ PRINT_STATS_LE32(general, num_missed_bcon);
+ PRINT_STATS_LE32(general, adc_rx_saturation_time);
+ PRINT_STATS_LE32(general, ina_detection_search_time);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+ PRINT_STATS_LE32(general, interference_data_flag);
+ PRINT_STATS_LE32(general, channel_load);
+ PRINT_STATS_LE32(general, beacon_rssi_a);
+ PRINT_STATS_LE32(general, beacon_rssi_b);
+ PRINT_STATS_LE32(general, beacon_rssi_c);
+ PRINT_STATS_LE32(general, beacon_energy_a);
+ PRINT_STATS_LE32(general, beacon_energy_b);
+ PRINT_STATS_LE32(general, beacon_energy_c);
+ PRINT_STATS_LE32(general, num_bt_kills);
+ PRINT_STATS_LE32(general, mac_id);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - HT");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_ht_phy_v1 *ht =
+ &mvm->rx_stats_v3.ofdm_ht;
+
+ PRINT_STATS_LE32(ht, plcp_err);
+ PRINT_STATS_LE32(ht, overrun_err);
+ PRINT_STATS_LE32(ht, early_overrun_err);
+ PRINT_STATS_LE32(ht, crc32_good);
+ PRINT_STATS_LE32(ht, crc32_err);
+ PRINT_STATS_LE32(ht, mh_format_err);
+ PRINT_STATS_LE32(ht, agg_crc32_good);
+ PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+ PRINT_STATS_LE32(ht, agg_cnt);
+ PRINT_STATS_LE32(ht, unsupport_mcs);
+ } else {
+ struct mvm_statistics_rx_ht_phy *ht =
+ &mvm->rx_stats.ofdm_ht;
+
+ PRINT_STATS_LE32(ht, mh_format_err);
+ PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+ PRINT_STATS_LE32(ht, agg_cnt);
+ PRINT_STATS_LE32(ht, unsupport_mcs);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+#undef PRINT_STAT_LE32
+
+static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
+ char __user *user_buf, size_t count,
+ loff_t *ppos,
+ struct iwl_mvm_frame_stats *stats)
+{
+ char *buff, *pos, *endpos;
+ int idx, i;
+ int ret;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_bh(&mvm->drv_stats_lock);
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos,
+ "Legacy/HT/VHT\t:\t%d/%d/%d\n",
+ stats->legacy_frames,
+ stats->ht_frames,
+ stats->vht_frames);
+ pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
+ stats->bw_20_frames,
+ stats->bw_40_frames,
+ stats->bw_80_frames);
+ pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
+ stats->ngi_frames,
+ stats->sgi_frames);
+ pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
+ stats->siso_frames,
+ stats->mimo2_frames);
+ pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
+ stats->fail_frames,
+ stats->success_frames);
+ pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
+ stats->agg_frames);
+ pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
+ stats->ampdu_count);
+ pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+ stats->ampdu_count > 0 ?
+ (stats->agg_frames / stats->ampdu_count) : 0);
+
+ pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+
+ idx = stats->last_frame_idx - 1;
+ for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+ idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+ if (stats->last_rates[idx] == 0)
+ continue;
+ pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
+ (int)(ARRAY_SIZE(stats->last_rates) - i));
+ pos += rs_pretty_print_rate(pos, endpos - pos,
+ stats->last_rates[idx]);
+ }
+ spin_unlock_bh(&mvm->drv_stats_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+
+ return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
+ &mvm->drv_rx_stats);
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int __maybe_unused ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+
+ /* allow one more restart that we're provoking here */
+ if (mvm->fw_restart >= 0)
+ mvm->fw_restart++;
+
+ /* take the return value to make compiler happy - it will fail anyway */
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
+
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
+ if (ret)
+ return ret;
+
+ iwl_force_nmi(mvm->trans);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
+
+ return count;
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ /* print which antennas were set for the scan command by the user */
+ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+ if (mvm->scan_rx_ant & ANT_A)
+ pos += scnprintf(buf + pos, bufsz - pos, "A");
+ if (mvm->scan_rx_ant & ANT_B)
+ pos += scnprintf(buf + pos, bufsz - pos, "B");
+ if (mvm->scan_rx_ant & ANT_C)
+ pos += scnprintf(buf + pos, bufsz - pos, "C");
+ pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 scan_rx_ant;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+ return -EINVAL;
+ if (scan_rx_ant > ANT_ABC)
+ return -EINVAL;
+ if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
+ return -EINVAL;
+
+ if (mvm->scan_rx_ant != scan_rx_ant) {
+ mvm->scan_rx_ant = scan_rx_ant;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_UDP |
+ IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+ IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_UDP |
+ IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+ };
+ int ret, i, num_repeats, nbytes = count / 2;
+
+ ret = hex2bin(cmd.indirection_table, buf, nbytes);
+ if (ret)
+ return ret;
+
+ /*
+ * The input is the redirection table, partial or full.
+ * Repeat the pattern if needed.
+ * For example, input of 01020F will be repeated 42 times,
+ * indirecting RSS hash results to queues 1, 2, 15 (skipping
+ * queues 3 - 14).
+ */
+ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+ for (i = 1; i < num_repeats; i++)
+ memcpy(&cmd.indirection_table[i * nbytes],
+ cmd.indirection_table, nbytes);
+ /* handle cut in the middle pattern for the last places */
+ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+ ARRAY_SIZE(cmd.indirection_table) % nbytes);
+
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+ mutex_lock(&mvm->mutex);
+ if (iwl_mvm_firmware_running(mvm))
+ ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0,
+ sizeof(cmd), &cmd);
+ else
+ ret = 0;
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rx_cmd_buffer rxb = {
+ ._rx_page_order = 0,
+ .truesize = 0, /* not used */
+ ._offset = 0,
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_mpdu_desc *desc;
+ int bin_len = count / 2;
+ int ret = -EINVAL;
+ size_t mpdu_cmd_hdr_size =
+ (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ /* supporting only 9000 descriptor */
+ if (!mvm->trans->cfg->mq_rx_supported)
+ return -ENOTSUPP;
+
+ rxb._page = alloc_pages(GFP_ATOMIC, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, bin_len);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access */
+ if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
+ goto out;
+
+ /* check this is RX packet */
+ if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
+ WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
+ goto out;
+
+ /* check the length in metadata matches actual received length */
+ desc = (void *)pkt->data;
+ if (le16_to_cpu(desc->mpdu_len) !=
+ (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
+ goto out;
+
+ local_bh_disable();
+ iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int conf;
+ char buf[8];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ conf = mvm->fwrt.dump.conf;
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/*
+ * Enable / Disable continuous recording.
+ * Cause the FW to start continuous recording, by sending the relevant hcmd.
+ * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING.
+ * Disable: for 0 as input, DISABLE_CONT_RECORDING.
+ */
+static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_trans *trans = mvm->trans;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
+ struct iwl_continuous_record_cmd cont_rec = {};
+ int ret, rec_mode;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (!dest)
+ return -EOPNOTSUPP;
+
+ if (dest->monitor_mode != SMEM_MODE ||
+ trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ return -EOPNOTSUPP;
+
+ ret = kstrtoint(buf, 0, &rec_mode);
+ if (ret)
+ return ret;
+
+ cont_rec.record_mode.enable_recording = rec_mode ?
+ cpu_to_le16(ENABLE_CONT_RECORDING) :
+ cpu_to_le16(DISABLE_CONT_RECORDING);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0,
+ sizeof(cont_rec), &cont_rec);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int conf_id;
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ ret = kstrtouint(buf, 0, &conf_id);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+ if (ret)
+ return ret;
+ if (count == 0)
+ return 0;
+
+ iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
+ (count - 1), NULL);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int max_amsdu_len;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &max_amsdu_len);
+ if (ret)
+ return ret;
+
+ if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
+ return -EINVAL;
+ mvm->max_amsdu_len = max_amsdu_len;
+
+ return count;
+}
+
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ const struct iwl_fw_bcast_filter *filter;
+ char *buf;
+ int bufsz = 1024;
+ int i, j, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+ filter = &cmd.filters[i];
+
+ ADD_TEXT("Filter [%d]:\n", i);
+ ADD_TEXT("\tDiscard=%d\n", filter->discard);
+ ADD_TEXT("\tFrame Type: %s\n",
+ filter->frame_type ? "IPv4" : "Generic");
+
+ for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+ const struct iwl_fw_bcast_filter_attr *attr;
+
+ attr = &filter->attrs[j];
+ if (!attr->mask)
+ break;
+
+ ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+ j, attr->offset,
+ attr->offset_type ? "IP End" :
+ "Payload Start",
+ be32_to_cpu(attr->mask),
+ be32_to_cpu(attr->val),
+ le16_to_cpu(attr->reserved1));
+ }
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int pos, next_pos;
+ struct iwl_fw_bcast_filter filter = {};
+ struct iwl_bcast_filter_cmd cmd;
+ u32 filter_id, attr_id, mask, value;
+ int err = 0;
+
+ if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+ &filter.frame_type, &pos) != 3)
+ return -EINVAL;
+
+ if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+ filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+ return -EINVAL;
+
+ for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+ attr_id++) {
+ struct iwl_fw_bcast_filter_attr *attr =
+ &filter.attrs[attr_id];
+
+ if (pos >= count)
+ break;
+
+ if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+ &attr->offset, &attr->offset_type,
+ &mask, &value, &next_pos) != 4)
+ return -EINVAL;
+
+ attr->mask = cpu_to_be32(mask);
+ attr->val = cpu_to_be32(value);
+ if (mask)
+ filter.num_attrs++;
+
+ pos += next_pos;
+ }
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+ &filter, sizeof(filter));
+
+ /* send updated bcast filtering configuration */
+ if (iwl_mvm_firmware_running(mvm) &&
+ mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ char *buf;
+ int bufsz = 1024;
+ int i, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+ const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+ ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+ i, mac->default_discard, mac->attached_filters);
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_bcast_filter_cmd cmd;
+ struct iwl_fw_bcast_mac mac = {};
+ u32 mac_id, attached_filters;
+ int err = 0;
+
+ if (!mvm->bcast_filters)
+ return -ENOENT;
+
+ if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+ &attached_filters) != 3)
+ return -EINVAL;
+
+ if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+ mac.default_discard > 1 ||
+ attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+ return -EINVAL;
+
+ mac.attached_filters = cpu_to_le16(attached_filters);
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+ &mac, sizeof(mac));
+
+ /* send updated bcast filtering configuration */
+ if (iwl_mvm_firmware_running(mvm) &&
+ mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int store;
+
+ if (sscanf(buf, "%d", &store) != 1)
+ return -EINVAL;
+
+ mvm->store_d3_resume_sram = store;
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ const struct fw_img *img;
+ int ofs, len, pos = 0;
+ size_t bufsz, ret;
+ char *buf;
+ u8 *ptr = mvm->d3_resume_sram;
+
+ img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ bufsz = len * 4 + 256;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
+ mvm->store_d3_resume_sram ? "en" : "dis");
+
+ if (ptr) {
+ for (ofs = 0; ofs < len; ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "0x%.4x %16ph\n", ofs, ptr + ofs);
+ }
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "(no data captured)\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ kfree(buf);
+
+ return ret;
+}
+#endif
+
+#define PRINT_MVM_REF(ref) do { \
+ if (mvm->refs[ref]) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t(0x%lx): %d %s\n", \
+ BIT(ref), mvm->refs[ref], #ref); \
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int i, pos = 0;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+ u32 refs = 0;
+
+ for (i = 0; i < IWL_MVM_REF_COUNT; i++)
+ if (mvm->refs[i])
+ refs |= BIT(i);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n",
+ refs);
+
+ PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+ PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
+ PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+ PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+ PRINT_MVM_REF(IWL_MVM_REF_USER);
+ PRINT_MVM_REF(IWL_MVM_REF_TX);
+ PRINT_MVM_REF(IWL_MVM_REF_TX_AGG);
+ PRINT_MVM_REF(IWL_MVM_REF_ADD_IF);
+ PRINT_MVM_REF(IWL_MVM_REF_START_AP);
+ PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED);
+ PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX);
+ PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS);
+ PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL);
+ PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ);
+ PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE);
+ PRINT_MVM_REF(IWL_MVM_REF_NMI);
+ PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
+ PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
+ PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
+ PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
+ PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
+ PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
+ PRINT_MVM_REF(IWL_MVM_REF_RX);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long value;
+ int ret;
+ bool taken;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ taken = mvm->refs[IWL_MVM_REF_USER];
+ if (value == 1 && !taken)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+ else if (value == 0 && taken)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, mvm, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, sta, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+iwl_dbgfs_prph_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+ int ret;
+
+ if (!mvm->dbgfs_prph_reg_addr)
+ return -EINVAL;
+
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
+ mvm->dbgfs_prph_reg_addr,
+ iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 args;
+ u32 value;
+ int ret;
+
+ args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+ /* if we only want to set the reg address - nothing more to do */
+ if (args == 1)
+ goto out;
+
+ /* otherwise, make sure we have both address and value */
+ if (args != 2)
+ return -EINVAL;
+
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+ if (ret)
+ return ret;
+
+ iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+out:
+ return count;
+}
+
+static ssize_t
+iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t
+iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
+ unsigned int pos = 0;
+ size_t bufsz = sizeof(buf);
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++)
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ mvm->uapsd_noagg_bssids[i].addr);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
+
+/* Device wide debugfs entries */
+MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget);
+MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
+MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
+MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
+MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
+MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
+ (IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
+
+MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
+#endif
+#ifdef CONFIG_ACPI
+MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
+#endif
+
+static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd cmd = {};
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ };
+ size_t delta;
+ ssize_t ret, len;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+ cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+
+ /* Take care of alignment of both the position and the length */
+ delta = *ppos & 0x3;
+ cmd.addr = cpu_to_le32(*ppos - delta);
+ cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4,
+ (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ len = min((size_t)le32_to_cpu(rsp->len) << 2,
+ iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+ len = min(len - delta, count);
+ if (len < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd *cmd;
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {};
+ size_t cmd_size;
+ size_t data_size;
+ u32 op, len;
+ ssize_t ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+
+ if (*ppos & 0x3 || count < 4) {
+ op = DEBUG_MEM_OP_WRITE_BYTES;
+ len = min(count, (size_t)(4 - (*ppos & 0x3)));
+ data_size = len;
+ } else {
+ op = DEBUG_MEM_OP_WRITE;
+ len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+ data_size = len << 2;
+ }
+
+ cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->op = cpu_to_le32(op);
+ cmd->len = cpu_to_le32(len);
+ cmd->addr = cpu_to_le32(*ppos);
+ if (copy_from_user((void *)cmd->data, user_buf, data_size)) {
+ kfree(cmd);
+ return -EFAULT;
+ }
+
+ hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ hcmd.data[0] = (void *)cmd;
+ hcmd.len[0] = cmd_size;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ kfree(cmd);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = data_size;
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static const struct file_operations iwl_dbgfs_mem_ops = {
+ .read = iwl_dbgfs_mem_read,
+ .write = iwl_dbgfs_mem_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_has_tlc_offload(mvm))
+ MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
+
+ return;
+err:
+ IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+{
+ struct dentry *bcast_dir __maybe_unused;
+ char buf[100];
+
+ spin_lock_init(&mvm->drv_stats_lock);
+
+ mvm->debugfs_dir = dbgfs_dir;
+
+ MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
+#ifdef CONFIG_ACPI
+ MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
+#endif
+
+ if (!debugfs_create_bool("enable_scan_iteration_notif",
+ 0600,
+ mvm->debugfs_dir,
+ &mvm->scan_iter_notif_enabled))
+ goto err;
+ if (!debugfs_create_bool("drop_bcn_ap_mode", 0600,
+ mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
+ goto err;
+
+ MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+ bcast_dir = debugfs_create_dir("bcast_filtering",
+ mvm->debugfs_dir);
+ if (!bcast_dir)
+ goto err;
+
+ if (!debugfs_create_bool("override", 0600,
+ bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override))
+ goto err;
+
+ MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+ bcast_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+ bcast_dir, 0600);
+ }
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+ MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
+ if (!debugfs_create_bool("d3_wake_sysassert", 0600,
+ mvm->debugfs_dir, &mvm->d3_wake_sysassert))
+ goto err;
+ if (!debugfs_create_u32("last_netdetect_scans", 0400,
+ mvm->debugfs_dir, &mvm->last_netdetect_scans))
+ goto err;
+#endif
+
+ if (!debugfs_create_u8("ps_disabled", 0400,
+ mvm->debugfs_dir, &mvm->ps_disabled))
+ goto err;
+ if (!debugfs_create_blob("nvm_hw", 0400,
+ mvm->debugfs_dir, &mvm->nvm_hw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_sw", 0400,
+ mvm->debugfs_dir, &mvm->nvm_sw_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_calib", 0400,
+ mvm->debugfs_dir, &mvm->nvm_calib_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_prod", 0400,
+ mvm->debugfs_dir, &mvm->nvm_prod_blob))
+ goto err;
+ if (!debugfs_create_blob("nvm_phy_sku", 0400,
+ mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
+ goto err;
+
+ debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
+
+ /*
+ * Create a symlink with mac80211. It will be removed when mac80211
+ * exists (before the opmode exists which removes the target.)
+ */
+ snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
+ if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
+ goto err;
+
+ return 0;
+err:
+ IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
new file mode 100644
index 000000000..ede6ef8d3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,103 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
+} \
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
new file mode 100644
index 000000000..e8e74dd55
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -0,0 +1,100 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_h__
+#define __fw_api_h__
+
+#include "fw/api/tdls.h"
+#include "fw/api/mac-cfg.h"
+#include "fw/api/offload.h"
+#include "fw/api/context.h"
+#include "fw/api/time-event.h"
+#include "fw/api/datapath.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/api/alive.h"
+#include "fw/api/binding.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/coex.h"
+#include "fw/api/commands.h"
+#include "fw/api/d3.h"
+#include "fw/api/filter.h"
+#include "fw/api/led.h"
+#include "fw/api/mac.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/phy-ctxt.h"
+#include "fw/api/power.h"
+#include "fw/api/rs.h"
+#include "fw/api/rx.h"
+#include "fw/api/scan.h"
+#include "fw/api/sf.h"
+#include "fw/api/sta.h"
+#include "fw/api/stats.h"
+#include "fw/api/tof.h"
+#include "fw/api/tx.h"
+
+#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
new file mode 100644
index 000000000..c7e2b88cd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -0,0 +1,1306 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/img.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-prph.h"
+#include "fw/acpi.h"
+
+#include "mvm.h"
+#include "fw/dbg.h"
+#include "iwl-phy-db.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+
+#define MVM_UCODE_ALIVE_TIMEOUT HZ
+#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+
+struct iwl_mvm_alive_data {
+ bool valid;
+ u32 scd_base_addr;
+};
+
+static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
+ sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+{
+ int i;
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
+ };
+
+ if (mvm->trans->num_rx_queues == 1)
+ return 0;
+
+ /* Do not direct RSS traffic to Q 0 which is our fallback queue */
+ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+ cmd.indirection_table[i] =
+ 1 + (i % (mvm->trans->num_rx_queues - 1));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+ return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+}
+
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+ int i, num_queues, size, ret;
+ struct iwl_rfh_queue_config *cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ /* Do not configure default queue, it is configured via context info */
+ num_queues = mvm->trans->num_rx_queues - 1;
+
+ size = struct_size(cmd, data, num_queues);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = size;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_dqa_enable_cmd dqa_cmd = {
+ .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
+ };
+ u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
+ else
+ IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
+
+ return ret;
+}
+
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+ __le32 *dump_data = mfu_dump_notif->data;
+ int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+ int i;
+
+ if (mfu_dump_notif->index_num == 0)
+ IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ le32_to_cpu(mfu_dump_notif->assert_id));
+
+ for (i = 0; i < n_words; i++)
+ IWL_DEBUG_INFO(mvm,
+ "MFUART assert dump, dword %u: 0x%08x\n",
+ le16_to_cpu(mfu_dump_notif->index_num) *
+ n_words + i,
+ le32_to_cpu(dump_data[i]));
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_alive_data *alive_data = data;
+ struct mvm_alive_resp_v3 *palive3;
+ struct mvm_alive_resp *palive;
+ struct iwl_umac_alive *umac;
+ struct iwl_lmac_alive *lmac1;
+ struct iwl_lmac_alive *lmac2 = NULL;
+ u16 status;
+ u32 umac_error_event_table;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+ } else {
+ palive3 = (void *)pkt->data;
+ umac = &palive3->umac_data;
+ lmac1 = &palive3->lmac_data;
+ status = le16_to_cpu(palive3->status);
+ }
+
+ mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+ if (lmac2)
+ mvm->error_event_table[1] =
+ le32_to_cpu(lmac2->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
+
+ umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+
+ if (!umac_error_event_table) {
+ mvm->support_umac_log = false;
+ } else if (umac_error_event_table >=
+ mvm->trans->cfg->min_umac_error_event_table) {
+ mvm->support_umac_log = true;
+ mvm->umac_error_event_table = umac_error_event_table;
+ } else {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ mvm->umac_error_event_table,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+ "Init" : "RT");
+ mvm->support_umac_log = false;
+ }
+
+ alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+ alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+
+ IWL_DEBUG_FW(mvm,
+ "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ status, lmac1->ver_type, lmac1->ver_subtype);
+
+ if (lmac2)
+ IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(umac->umac_major),
+ le32_to_cpu(umac->umac_minor));
+
+ return true;
+}
+
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+
+ return true;
+}
+
+static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_phy_db *phy_db = data;
+
+ if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+ return true;
+ }
+
+ WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
+
+ return false;
+}
+
+static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+ enum iwl_ucode_type ucode_type)
+{
+ struct iwl_notification_wait alive_wait;
+ struct iwl_mvm_alive_data alive_data;
+ const struct fw_img *fw;
+ int ret, i;
+ enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
+ static const u16 alive_cmd[] = { MVM_ALIVE };
+
+ if (ucode_type == IWL_UCODE_REGULAR &&
+ iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
+ !(fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
+ fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
+ else
+ fw = iwl_get_ucode_image(mvm->fw, ucode_type);
+ if (WARN_ON(!fw))
+ return -EINVAL;
+ iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+ iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
+ alive_cmd, ARRAY_SIZE(alive_cmd),
+ iwl_alive_fn, &alive_data);
+
+ ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
+ if (ret) {
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ iwl_remove_notification(&mvm->notif_wait, &alive_wait);
+ return ret;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the ALIVE notification here.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+ if (ret) {
+ struct iwl_trans *trans = mvm->trans;
+
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
+ else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(trans, SB_CPU_1_STATUS),
+ iwl_read_prph(trans, SB_CPU_2_STATUS));
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return ret;
+ }
+
+ if (!alive_data.valid) {
+ IWL_ERR(mvm, "Loaded ucode is not valid!\n");
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return -EIO;
+ }
+
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
+ /*
+ * Note: all the queues are enabled as part of the interface
+ * initialization, but in firmware restart scenarios they
+ * could be stopped, so wake them up. In firmware restart,
+ * mac80211 will have the queues stopped as well until the
+ * reconfiguration completes. During normal startup, they
+ * will be empty.
+ */
+
+ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+ atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
+
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+ return 0;
+}
+
+static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ struct iwl_init_extended_cfg_cmd init_cfg = {
+ .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
+ };
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ /* Send init config command to mark that we are sending NVM access
+ * commands
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
+ INIT_EXTENDED_CFG_CMD), 0,
+ sizeof(init_cfg), &init_cfg);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run init config command: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Load NVM to NIC if needed */
+ if (mvm->nvm_file_name) {
+ iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
+ iwl_mvm_load_nvm_to_nic(mvm);
+ }
+
+ if (IWL_MVM_PARSE_NVM && read_nvm) {
+ ret = iwl_nvm_init(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE), 0,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (!IWL_MVM_PARSE_NVM && read_nvm) {
+ mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
+ if (IS_ERR(mvm->nvm_data)) {
+ ret = PTR_ERR(mvm->nvm_data);
+ mvm->nvm_data = NULL;
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
+static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_phy_cfg_cmd phy_cfg_cmd;
+ enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+
+ /* Set parameters */
+ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
+
+ /* set flags extra PHY configuration flags from the device's cfg */
+ phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
+
+ phy_cfg_cmd.calib_control.event_trigger =
+ mvm->fw->default_calib[ucode_type].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ mvm->fw->default_calib[ucode_type].flow_trigger;
+
+ IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
+ phy_cfg_cmd.phy_cfg);
+
+ return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait calib_wait;
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ CALIB_RES_NOTIF_PHY_DB
+ };
+ int ret;
+
+ if (iwl_mvm_has_unified_ucode(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, true);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(mvm->calibrating))
+ return 0;
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &calib_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_phy_db_entry,
+ mvm->phy_db);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
+ goto remove_notif;
+ }
+
+ if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+ ret = iwl_mvm_send_bt_init_conf(mvm);
+ if (ret)
+ goto remove_notif;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (read_nvm) {
+ ret = iwl_nvm_init(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto remove_notif;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
+ WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
+ "Too old NVM version (0x%0x, required = 0x%0x)",
+ mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
+
+ /*
+ * abort after reading the nvm in case RF Kill is on, we will complete
+ * the init seq later when RF kill will switch to off
+ */
+ if (iwl_mvm_is_radio_hw_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm,
+ "jump over all phy activities due to RF kill\n");
+ goto remove_notif;
+ }
+
+ mvm->calibrating = true;
+
+ /* Send TX valid antennas before triggering calibrations */
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+ if (ret)
+ goto remove_notif;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+ ret);
+ goto remove_notif;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the calibration complete notification.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+ MVM_UCODE_CALIB_TIMEOUT);
+ if (!ret)
+ goto out;
+
+ if (iwl_mvm_is_radio_hw_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+ ret = 0;
+ } else {
+ IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+ ret);
+ }
+
+ goto out;
+
+remove_notif:
+ iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+out:
+ mvm->calibrating = false;
+ if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
+ /* we want to debug INIT and we have no NVM - fake */
+ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
+ sizeof(struct ieee80211_channel) +
+ sizeof(struct ieee80211_rate),
+ GFP_KERNEL);
+ if (!mvm->nvm_data)
+ return -ENOMEM;
+ mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
+ mvm->nvm_data->bands[0].n_channels = 1;
+ mvm->nvm_data->bands[0].n_bitrates = 1;
+ mvm->nvm_data->bands[0].bitrates =
+ (void *)mvm->nvm_data->channels + 1;
+ mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
+{
+ struct iwl_ltr_config_cmd cmd = {
+ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!mvm->trans->ltr_enabled)
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+ sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_ACPI
+static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
+ union acpi_object *table,
+ struct iwl_mvm_sar_profile *profile,
+ bool enabled)
+{
+ int i;
+
+ profile->enabled = enabled;
+
+ for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
+ if ((table[i].type != ACPI_TYPE_INTEGER) ||
+ (table[i].integer.value > U8_MAX))
+ return -EINVAL;
+
+ profile->table[i] = table[i].integer.value;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+ union acpi_object *wifi_pkg, *table, *data;
+ bool enabled;
+ int ret;
+
+ data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
+ enabled);
+out_free:
+ kfree(data);
+ return ret;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+ union acpi_object *wifi_pkg, *data;
+ bool enabled;
+ int i, n_profiles, ret;
+
+ data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
+ (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+ int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_mvm_sar_set_profile(mvm,
+ &wifi_pkg->package.elements[pos],
+ &mvm->sar_profiles[i + 1],
+ enabled);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += ACPI_SAR_TABLE_SIZE;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+{
+ union acpi_object *wifi_pkg, *data;
+ int i, j, ret;
+ int idx = 1;
+
+ data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_WGDS_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[idx++];
+ if ((entry->type != ACPI_TYPE_INTEGER) ||
+ (entry->integer.value > U8_MAX)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ mvm->geo_profiles[i].values[j] = entry->integer.value;
+ }
+ }
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+{
+ struct iwl_dev_tx_power_cmd cmd = {
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ int i, j, idx;
+ int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
+ int len = sizeof(cmd);
+
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
+ ACPI_SAR_TABLE_SIZE);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v3);
+
+ for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
+ struct iwl_mvm_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+ if (profs[i] > ACPI_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &mvm->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /* if one of the profiles is disabled, we fail all */
+ return -ENOENT;
+ }
+
+ IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
+ for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
+ idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
+ cmd.v3.per_chain_restriction[i][j] =
+ cpu_to_le16(prof->table[idx]);
+ IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
+ j, prof->table[idx]);
+ }
+ }
+
+ IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
+ ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+{
+ struct iwl_geo_tx_power_profiles_resp *resp;
+ int ret;
+
+ struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
+ .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ .len = { sizeof(geo_cmd), },
+ .flags = CMD_WANT_SKB,
+ .data = { &geo_cmd },
+ };
+
+ if (!iwl_mvm_sar_geo_support(mvm))
+ return -EOPNOTSUPP;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
+ return ret;
+ }
+
+ resp = (void *)cmd.resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
+ ret = -EIO;
+ IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
+ }
+
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ struct iwl_geo_tx_power_profiles_cmd cmd = {
+ .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
+ };
+ int ret, i, j;
+ u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+ if (!iwl_mvm_sar_geo_support(mvm))
+ return 0;
+
+ ret = iwl_mvm_sar_get_wgds_table(mvm);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ return 0;
+ }
+
+ IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
+
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+ (struct iwl_per_chain_offset *)&cmd.table[i];
+
+ for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ u8 *value;
+
+ value = &mvm->geo_profiles[i].values[j *
+ ACPI_GEO_PER_CHAIN_SIZE];
+ chain[j].max_tx_power = cpu_to_le16(value[0]);
+ chain[j].chain_a = value[1];
+ chain[j].chain_b = value[2];
+ IWL_DEBUG_RADIO(mvm,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j, value[1], value[2], value[0]);
+ }
+ }
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+}
+
+#else /* CONFIG_ACPI */
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
+ int prof_b)
+{
+ return -ENOENT;
+}
+
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ ret = iwl_mvm_sar_get_wrds_table(mvm);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /*
+ * If not available, don't fail and don't bother with EWRD.
+ * Return 1 to tell that we can't use WGDS either.
+ */
+ return 1;
+ }
+
+ ret = iwl_mvm_sar_get_ewrd_table(mvm);
+ /* if EWRD is not available, we can still use WRDS, so don't fail */
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ /* choose profile 1 (WRDS) as default for both chains */
+ ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+ /*
+ * If we don't have profile 0 from BIOS, just skip it. This
+ * means that SAR Geo will not be enabled either, even if we
+ * have other valid profiles.
+ */
+ if (ret == -ENOENT)
+ return 1;
+
+ return ret;
+}
+
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ if (iwl_mvm_has_unified_ucode(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, false);
+
+ ret = iwl_run_init_mvm_ucode(mvm, false);
+
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
+ return ret;
+ }
+
+ /*
+ * Stop and start the transport without entering low power
+ * mode. This will save the state of other components on the
+ * device that are triggered by the INIT firwmare (MFUART).
+ */
+ _iwl_trans_stop_device(mvm->trans, false);
+ ret = _iwl_trans_start_hw(mvm->trans, false);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret)
+ return ret;
+
+ return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_rt_fw(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ iwl_get_shared_mem_conf(&mvm->fwrt);
+
+ ret = iwl_mvm_sf_update(mvm, NULL, false);
+ if (ret)
+ IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+
+ mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ /* if we have a destination, assume EARLY START */
+ if (mvm->fw->dbg_dest_tlv)
+ mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
+ iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
+
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+ if (ret)
+ goto error;
+
+ if (!iwl_mvm_has_unified_ucode(mvm)) {
+ /* Send phy db control command and then phy db calibration */
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_send_bt_init_conf(mvm);
+ if (ret)
+ goto error;
+
+ /* Init RSS configuration */
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ ret = iwl_configure_rxq(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ ret = iwl_send_rss_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+
+ /* reset quota debouncing buffer - 0xff will yield invalid data */
+ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
+
+ ret = iwl_mvm_send_dqa_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* Add auxiliary station for scanning */
+ ret = iwl_mvm_add_aux_sta(mvm);
+ if (ret)
+ goto error;
+
+ /* Add all the PHY contexts */
+ chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten in the other flows.
+ * For now use the first channel we have.
+ */
+ ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
+ &chandef, 1, 1);
+ if (ret)
+ goto error;
+ }
+
+#ifdef CONFIG_THERMAL
+ if (iwl_mvm_is_tt_in_fw(mvm)) {
+ /* in order to give the responsibility of ct-kill and
+ * TX backoff to FW we need to send empty temperature reporting
+ * cmd during init time
+ */
+ iwl_mvm_send_temp_report_ths_cmd(mvm);
+ } else {
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+ }
+
+ /* TODO: read the budget from BIOS / Platform NVM */
+
+ /*
+ * In case there is no budget from BIOS / Platform NVM the default
+ * budget should be 2000mW (cooling state 0).
+ */
+ if (iwl_mvm_is_ctdp_supported(mvm)) {
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ mvm->cooling_dev.cur_state);
+ if (ret)
+ goto error;
+ }
+#else
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+#endif
+
+ WARN_ON(iwl_mvm_config_ltr(mvm));
+
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ goto error;
+
+ /*
+ * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
+ * anyway, so don't init MCC.
+ */
+ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
+ ret = iwl_mvm_init_mcc(mvm);
+ if (ret)
+ goto error;
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
+ mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
+ ret = iwl_mvm_config_scan(mvm);
+ if (ret)
+ goto error;
+ }
+
+ /* allow FW/transport low power modes if not during restart */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+ ret = iwl_mvm_sar_init(mvm);
+ if (ret == 0) {
+ ret = iwl_mvm_sar_geo_init(mvm);
+ } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+ /*
+ * If basic SAR is not available, we check for WGDS,
+ * which should *not* be available either. If it is
+ * available, issue an error, because we can't use SAR
+ * Geo without basic SAR.
+ */
+ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
+ }
+
+ if (ret < 0)
+ goto error;
+
+ iwl_mvm_leds_sync(mvm);
+
+ IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+ return 0;
+ error:
+ if (!iwlmvm_mod_params.init_dbg || !ret)
+ iwl_mvm_stop_device(mvm);
+ return ret;
+}
+
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
+{
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+ if (ret)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+ /* Add auxiliary station for scanning */
+ ret = iwl_mvm_add_aux_sta(mvm);
+ if (ret)
+ goto error;
+
+ return 0;
+ error:
+ iwl_mvm_stop_device(mvm);
+ return ret;
+}
+
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+ u32 flags = le32_to_cpu(card_state_notif->flags);
+
+ IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_KILL_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+}
+
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->installed_ver),
+ le32_to_cpu(mfuart_notif->external_ver),
+ le32_to_cpu(mfuart_notif->status),
+ le32_to_cpu(mfuart_notif->duration));
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: image size: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->image_size));
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
new file mode 100644
index 000000000..072f80c90
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -0,0 +1,178 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/leds.h>
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+
+static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on)
+{
+ struct iwl_led_cmd led_cmd = {
+ .status = cpu_to_le32(on),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(LONG_GROUP, LEDS_CMD),
+ .len = { sizeof(led_cmd), },
+ .data = { &led_cmd, },
+ .flags = CMD_ASYNC,
+ };
+ int err;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return;
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (err)
+ IWL_WARN(mvm, "LED command failed: %d\n", err);
+}
+
+static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
+ iwl_mvm_send_led_fw_cmd(mvm, on);
+ return;
+ }
+
+ iwl_write32(mvm->trans, CSR_LED_REG,
+ on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
+
+ iwl_mvm_led_set(mvm, brightness > 0);
+}
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ int mode = iwlwifi_mod_params.led_mode;
+ int ret;
+
+ switch (mode) {
+ case IWL_LED_BLINK:
+ IWL_ERR(mvm, "Blink led mode not supported, used default\n");
+ case IWL_LED_DEFAULT:
+ case IWL_LED_RF_STATE:
+ mode = IWL_LED_RF_STATE;
+ break;
+ case IWL_LED_DISABLE:
+ IWL_INFO(mvm, "Led disabled\n");
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mvm->hw->wiphy));
+ if (!mvm->led.name)
+ return -ENOMEM;
+
+ mvm->led.brightness_set = iwl_led_brightness_set;
+ mvm->led.max_brightness = 1;
+
+ if (mode == IWL_LED_RF_STATE)
+ mvm->led.default_trigger =
+ ieee80211_get_radio_led_name(mvm->hw);
+
+ ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+ if (ret) {
+ kfree(mvm->led.name);
+ IWL_INFO(mvm, "Failed to enable led\n");
+ return ret;
+ }
+
+ mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+ return 0;
+}
+
+void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
+{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
+ return;
+
+ /*
+ * if we control through the register, we're doing it
+ * even when the firmware isn't up, so no need to sync
+ */
+ if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ return;
+
+ iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
+}
+
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
+ return;
+
+ led_classdev_unregister(&mvm->led);
+ kfree(mvm->led.name);
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
new file mode 100644
index 000000000..d90d58309
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -0,0 +1,1618 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "fw-api.h"
+#include "mvm.h"
+#include "time-event.h"
+
+const u8 iwl_mvm_ac_to_tx_fifo[] = {
+ IWL_MVM_TX_FIFO_VO,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_BK,
+};
+
+const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
+ IWL_GEN2_EDCA_TX_FIFO_VO,
+ IWL_GEN2_EDCA_TX_FIFO_VI,
+ IWL_GEN2_EDCA_TX_FIFO_BE,
+ IWL_GEN2_EDCA_TX_FIFO_BK,
+ IWL_GEN2_TRIG_TX_FIFO_VO,
+ IWL_GEN2_TRIG_TX_FIFO_VI,
+ IWL_GEN2_TRIG_TX_FIFO_BE,
+ IWL_GEN2_TRIG_TX_FIFO_BK,
+};
+
+struct iwl_mvm_mac_iface_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+ unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+ enum iwl_tsf_id preferred_tsf;
+ bool found_vif;
+};
+
+struct iwl_mvm_hw_queues_iface_iterator_data {
+ struct ieee80211_vif *exclude_vif;
+ unsigned long used_hw_queues;
+};
+
+static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 min_bi;
+
+ /* Skip the interface for which we are trying to assign a tsf_id */
+ if (vif == data->vif)
+ return;
+
+ /*
+ * The TSF is a hardware/firmware resource, there are 4 and
+ * the driver should assign and free them as needed. However,
+ * there are cases where 2 MACs should share the same TSF ID
+ * for the purpose of clock sync, an optimization to avoid
+ * clock drift causing overlapping TBTTs/DTIMs for a GO and
+ * client in the system.
+ *
+ * The firmware will decide according to the MAC type which
+ * will be the master and slave. Clients that need to sync
+ * with a remote station will be the master, and an AP or GO
+ * will be the slave.
+ *
+ * Depending on the new interface type it can be slaved to
+ * or become the master of an existing interface.
+ */
+ switch (data->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /*
+ * The new interface is a client, so if the one we're iterating
+ * is an AP, and the beacon interval of the AP is a multiple or
+ * divisor of the beacon interval of the client, the same TSF
+ * should be used to avoid drift between the new client and
+ * existing AP. The existing AP will get drift updates from the
+ * new client context in this case.
+ */
+ if (vif->type != NL80211_IFTYPE_AP ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+
+ case NL80211_IFTYPE_AP:
+ /*
+ * The new interface is AP/GO, so if its beacon interval is a
+ * multiple or a divisor of the beacon interval of an existing
+ * interface, it should get drift updates from an existing
+ * client or use the same TSF as an existing GO. There's no
+ * drift between TSFs internally but if they used different
+ * TSFs then a new client MAC could update one of them and
+ * cause drift that way.
+ */
+ if ((vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION) ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+ default:
+ /*
+ * For all other interface types there's no need to
+ * take drift into account. Either they're exclusive
+ * like IBSS and monitor, or we don't care much about
+ * their TSF (like P2P Device), but we won't be able
+ * to share the TSF resource.
+ */
+ break;
+ }
+
+ /*
+ * Unless we exited above, we can't share the TSF resource
+ * that the virtual interface we're iterating over is using
+ * with the new one, so clear the available bit and if this
+ * was the preferred one, reset that as well.
+ */
+ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+
+ if (data->preferred_tsf == mvmvif->tsf_id)
+ data->preferred_tsf = NUM_TSF_IDS;
+}
+
+/*
+ * Get the mask of the queues used by the vif
+ */
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
+{
+ u32 qmask = 0, ac;
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ qmask |= BIT(vif->hw_queue[ac]);
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ qmask |= BIT(vif->cab_queue);
+
+ return qmask;
+}
+
+static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+
+ /* exclude the given vif */
+ if (vif == data->exclude_vif)
+ return;
+
+ data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
+}
+
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *exclude_vif)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data data = {
+ .exclude_vif = exclude_vif,
+ .used_hw_queues =
+ BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+ BIT(mvm->aux_queue) |
+ BIT(IWL_MVM_DQA_GCAST_QUEUE),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* mark all VIF used hw queues */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_iface_hw_queues_iter, &data);
+
+ return data.used_hw_queues;
+}
+
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* Iterator may already find the interface being added -- skip it */
+ if (vif == data->vif) {
+ data->found_vif = true;
+ return;
+ }
+
+ /* Mark MAC IDs as used by clearing the available bit, and
+ * (below) mark TSFs as used if their existing use is not
+ * compatible with the new interface type.
+ * No locking or atomic bit operations are needed since the
+ * data is on the stack of the caller function.
+ */
+ __clear_bit(mvmvif->id, data->available_mac_ids);
+
+ /* find a suitable tsf_id */
+ iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+}
+
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_tsf_id_iter, &data);
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+}
+
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ .found_vif = false,
+ };
+ u32 ac;
+ int ret, i, queue_limit;
+ unsigned long used_hw_queues;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Allocate a MAC ID and a TSF for this MAC, along with the queues
+ * and other resources.
+ */
+
+ /*
+ * Before the iterator, we start with all MAC IDs and TSFs available.
+ *
+ * During iteration, all MAC IDs are cleared that are in use by other
+ * virtual interfaces, and all TSF IDs are cleared that can't be used
+ * by this new virtual interface because they're used by an interface
+ * that can't share it with the new one.
+ * At the same time, we check if there's a preferred TSF in the case
+ * that we should share it with another interface.
+ */
+
+ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ break;
+ /* fall through */
+ default:
+ __clear_bit(0, data.available_mac_ids);
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_iface_iterator, &data);
+
+ used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
+
+ /*
+ * In the case we're getting here during resume, it's similar to
+ * firmware restart, and with RESUME_ALL the iterator will find
+ * the vif being added already.
+ * We don't want to reassign any IDs in either case since doing
+ * so would probably assign different IDs (as interfaces aren't
+ * necessarily added in the same order), but the old IDs were
+ * preserved anyway, so skip ID assignment for both resume and
+ * recovery.
+ */
+ if (data.found_vif)
+ return 0;
+
+ /* Therefore, in recovery, we can't get here */
+ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return -EBUSY;
+
+ mvmvif->id = find_first_bit(data.available_mac_ids,
+ NUM_MAC_INDEX_DRIVER);
+ if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+ if (mvmvif->tsf_id == NUM_TSF_IDS) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ mvmvif->color = 0;
+
+ INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ mvmvif->time_event_data.id = TE_MAX;
+
+ /* No need to allocate data queues to P2P Device MAC.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+
+ return 0;
+ }
+
+ /*
+ * queues in mac80211 almost entirely independent of
+ * the ones here - no real limit
+ */
+ queue_limit = IEEE80211_MAX_QUEUES;
+ BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
+ BITS_PER_BYTE *
+ sizeof(mvm->hw_queue_to_mac80211[0]));
+
+ /*
+ * Find available queues, and allocate them to the ACs. When in
+ * DQA-mode they aren't really used, and this is done only so the
+ * mac80211 ieee80211_check_queues() function won't fail
+ */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ u8 queue = find_first_zero_bit(&used_hw_queues, queue_limit);
+
+ if (queue >= queue_limit) {
+ IWL_ERR(mvm, "Failed to allocate queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ __set_bit(queue, &used_hw_queues);
+ vif->hw_queue[ac] = queue;
+ }
+
+ /* Allocate the CAB queue for softAP and GO interfaces */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /*
+ * For TVQM this will be overwritten later with the FW assigned
+ * queue value (when queue is enabled).
+ */
+ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ } else {
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ }
+
+ mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+ mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+
+ return 0;
+
+exit_fail:
+ memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+ memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+ return ret;
+}
+
+static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band,
+ u8 *cck_rates, u8 *ofdm_rates)
+{
+ struct ieee80211_supported_band *sband;
+ unsigned long basic = vif->bss_conf.basic_rates;
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ u8 cck = 0;
+ u8 ofdm = 0;
+ int i;
+
+ sband = mvm->hw->wiphy->bands[band];
+
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ int hw = sband->bitrates[i].hw_value;
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+ if (lowest_present_ofdm > hw)
+ lowest_present_ofdm = hw;
+ } else {
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ cck |= BIT(hw);
+ if (lowest_present_cck > hw)
+ lowest_present_cck = hw;
+ }
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+ if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWL_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ /* for both sta and ap, ht_operation_mode hold the protection_mode */
+ u8 protection_mode = vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ /* The fw does not distinguish between ht and fat */
+ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+
+ IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+ /*
+ * See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ */
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ cmd->protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ default:
+ IWL_ERR(mvm, "Illegal protection mode %d\n",
+ protection_mode);
+ break;
+ }
+}
+
+static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
+ const u8 *bssid_override,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chanctx;
+ bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION);
+ u8 cck_ack_rates, ofdm_ack_rates;
+ const u8 *bssid = bssid_override ?: vif->bss_conf.bssid;
+ int i;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd->action = cpu_to_le32(action);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
+ else
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
+ break;
+ case NL80211_IFTYPE_AP:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+
+ memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+
+ if (bssid)
+ memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->bssid_addr);
+
+ rcu_read_lock();
+ chanctx = rcu_dereference(vif->chanctx_conf);
+ iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
+ : NL80211_BAND_2GHZ,
+ &cck_ack_rates, &ofdm_ack_rates);
+ rcu_read_unlock();
+
+ cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
+ cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
+
+ cmd->cck_short_preamble =
+ cpu_to_le32(vif->bss_conf.use_short_preamble ?
+ MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot =
+ cpu_to_le32(vif->bss_conf.use_short_slot ?
+ MAC_FLG_SHORT_SLOT : 0);
+
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
+
+ cmd->ac[txf].cw_min =
+ cpu_to_le16(mvmvif->queue_params[i].cw_min);
+ cmd->ac[txf].cw_max =
+ cpu_to_le16(mvmvif->queue_params[i].cw_max);
+ cmd->ac[txf].edca_txop =
+ cpu_to_le16(mvmvif->queue_params[i].txop * 32);
+ cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
+ cmd->ac[txf].fifos_mask = BIT(txf);
+ }
+
+ if (vif->bss_conf.qos)
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (vif->bss_conf.use_cts_prot)
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+
+ IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+ vif->bss_conf.use_cts_prot,
+ vif->bss_conf.ht_operation_mode);
+ if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+ if (ht_enabled)
+ iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
+}
+
+static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
+ sizeof(*cmd), cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
+ le32_to_cpu(cmd->action), ret);
+ return ret;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off,
+ const u8 *bssid_override)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mac_data_sta *ctxt_sta;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
+
+ if (vif->p2p) {
+ struct ieee80211_p2p_noa_attr *noa =
+ &vif->bss_conf.p2p_noa_attr;
+
+ cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+ ctxt_sta = &cmd.p2p_sta.sta;
+ } else {
+ ctxt_sta = &cmd.sta;
+ }
+
+ /* We need the dtim_period to set the MAC as associated */
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
+ !force_assoc_off) {
+ u32 dtim_offs;
+
+ /*
+ * The DTIM count counts down, so when it is N that means N
+ * more beacon intervals happen until the DTIM TBTT. Therefore
+ * add this to the current time. If that ends up being in the
+ * future, the firmware will handle it.
+ *
+ * Also note that the system_timestamp (which we get here as
+ * "sync_device_ts") and TSF timestamp aren't at exactly the
+ * same offset in the frame -- the TSF is at the first symbol
+ * of the TSF, the system timestamp is at signal acquisition
+ * time. This means there's an offset between them of at most
+ * a few hundred microseconds (24 * 8 bits + PLCP time gives
+ * 384us in the longest case), this is currently not relevant
+ * as the firmware wakes up around 2ms before the TBTT.
+ */
+ dtim_offs = vif->bss_conf.sync_dtim_count *
+ vif->bss_conf.beacon_int;
+ /* convert TU to usecs */
+ dtim_offs *= 1024;
+
+ ctxt_sta->dtim_tsf =
+ cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
+ ctxt_sta->dtim_time =
+ cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
+ ctxt_sta->assoc_beacon_arrive_time =
+ cpu_to_le32(vif->bss_conf.sync_device_ts);
+
+ IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
+ le64_to_cpu(ctxt_sta->dtim_tsf),
+ le32_to_cpu(ctxt_sta->dtim_time),
+ dtim_offs);
+
+ ctxt_sta->is_assoc = cpu_to_le32(1);
+ } else {
+ ctxt_sta->is_assoc = cpu_to_le32(0);
+
+ /* Allow beacons to pass through as long as we are not
+ * associated, or we do not have dtim period information.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ }
+
+ ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_sta->bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+ ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+ ctxt_sta->dtim_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period));
+
+ ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+ ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+
+ if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
+ if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ u32 tfd_queue_msk = BIT(mvm->snif_queue);
+ int ret;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
+ MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_CRC32);
+ ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
+
+ /* Allocate sniffer station */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk,
+ vif->type, IWL_STA_GENERAL_PURPOSE);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST);
+
+ /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ cmd.ibss.bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+ /* TODO: Assumes that the beacon id == mac context id */
+ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+struct iwl_mvm_go_iterator_data {
+ bool go_active;
+};
+
+static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->ap_ibss_active)
+ data->go_active = true;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mvm_go_iterator_data data = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+
+ /* Override the filter flags to accept only probe requests */
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
+ /*
+ * This flag should be set to true when the P2P Device is
+ * discoverable and there is at least another active P2P GO. Settings
+ * this flag will allow the P2P Device to be discoverable on other
+ * channels in addition to its listen channel.
+ * Note that this flag should not be set in other cases as it opens the
+ * Rx filters on all MAC and increases the number of interrupts.
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_go_iterator, &data);
+
+ cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ __le32 *tim_index, __le32 *tim_size,
+ u8 *beacon, u32 frame_size)
+{
+ u32 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /* The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon. */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx+1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ *tim_index = cpu_to_le32(tim_idx);
+ *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
+ } else {
+ IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+ }
+}
+
+static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+{
+ struct ieee80211_mgmt *mgmt = (void *)beacon;
+ const u8 *ie;
+
+ if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
+ return 0;
+
+ frame_size -= mgmt->u.beacon.variable - beacon;
+
+ ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
+ if (!ie)
+ return 0;
+
+ return ie - beacon;
+}
+
+static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
+{
+ u8 rate;
+
+ if (info->band == NL80211_BAND_5GHZ || vif->p2p)
+ rate = IWL_FIRST_OFDM_RATE;
+ else
+ rate = IWL_FIRST_CCK_RATE;
+
+ return rate;
+}
+
+static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon,
+ struct iwl_tx_cmd *tx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_tx_info *info;
+ u8 rate;
+ u32 tx_flags;
+
+ info = IEEE80211_SKB_CB(beacon);
+
+ /* Set up TX command fields */
+ tx->len = cpu_to_le16((u16)beacon->len);
+ tx->sta_id = mvmvif->bcast_sta.sta_id;
+ tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
+ tx_flags |=
+ iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
+ TX_CMD_FLG_BT_PRIO_POS;
+ tx->tx_flags = cpu_to_le32(tx_flags);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
+ mvm->mgmt_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
+ mvm->mgmt_last_antenna_idx);
+ }
+
+ tx->rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+
+ rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+
+ tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+ if (rate == IWL_FIRST_CCK_RATE)
+ tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
+ struct sk_buff *beacon,
+ void *data, int len)
+{
+ struct iwl_host_cmd cmd = {
+ .id = BEACON_TEMPLATE_CMD,
+ .flags = CMD_ASYNC,
+ };
+
+ cmd.len[0] = len;
+ cmd.data[0] = data;
+ cmd.dataflags[0] = 0;
+ cmd.len[1] = beacon->len;
+ cmd.data[1] = beacon->data;
+ cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_beacon_cmd_v6 beacon_cmd = {};
+
+ iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_beacon_cmd_v7 beacon_cmd = {};
+
+ iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ beacon_cmd.csa_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
+ beacon_cmd.ecsa_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
+
+ return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+ u16 flags;
+
+ flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
+
+ if (rate == IWL_FIRST_CCK_RATE)
+ flags |= IWL_MAC_BEACON_CCK;
+
+ beacon_cmd.flags = cpu_to_le16(flags);
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ beacon_cmd.csa_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
+ beacon_cmd.ecsa_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
+
+ return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ if (WARN_ON(!beacon))
+ return -EINVAL;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD))
+ return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE))
+ return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon);
+
+ return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon);
+}
+
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *beacon;
+ int ret;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
+
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+ if (!beacon)
+ return -ENOMEM;
+
+ ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
+ dev_kfree_skb(beacon);
+ return ret;
+}
+
+struct iwl_mvm_mac_ap_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ u32 beacon_device_ts;
+ u16 beacon_int;
+};
+
+/* Find the beacon_device_ts and beacon_int for a managed interface */
+static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_ap_iterator_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ /* Station client has higher priority over P2P client*/
+ if (vif->p2p && data->beacon_device_ts)
+ return;
+
+ data->beacon_device_ts = vif->bss_conf.sync_device_ts;
+ data->beacon_int = vif->bss_conf.beacon_int;
+}
+
+/*
+ * Fill the specific data for mac context of type AP of P2P GO
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
+ struct iwl_mac_data_ap *ctxt_ap,
+ bool add)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_ap_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .beacon_device_ts = 0
+ };
+
+ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
+ /*
+ * in AP mode, pass probe requests and beacons from other APs
+ * (needed for ht protection); when there're no any associated
+ * station don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+ } else {
+ IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+ }
+
+ ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_ap->bi_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+ ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+ ctxt_ap->dtim_reciprocal =
+ cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period));
+
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE))
+ ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+
+ /*
+ * Only set the beacon time when the MAC is being added, when we
+ * just modify the MAC then we should keep the time -- the firmware
+ * can otherwise have a "jumping" TBTT.
+ */
+ if (add) {
+ /*
+ * If there is a station/P2P client interface which is
+ * associated, set the AP's TBTT far enough from the station's
+ * TBTT. Otherwise, set it to the current system time
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_ap_iterator, &data);
+
+ if (data.beacon_device_ts) {
+ u32 rand = (prandom_u32() % (64 - 36)) + 36;
+ mvmvif->ap_beacon_time = data.beacon_device_ts +
+ ieee80211_tu_to_usec(data.beacon_int * rand /
+ 100);
+ } else {
+ mvmvif->ap_beacon_time =
+ iwl_read_prph(mvm->trans,
+ DEVICE_SYSTEM_TIME_REG);
+ }
+ }
+
+ ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
+ ctxt_ap->beacon_tsf = 0; /* unused */
+
+ /* TODO: Assume that the beacon id == mac context id */
+ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ /* Fill the data specific for ap mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap,
+ action == FW_CTXT_ACTION_ADD);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ /* Fill the data specific for GO mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap,
+ action == FW_CTXT_ACTION_ADD);
+
+ cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+ cmd.go.opp_ps_enabled =
+ cpu_to_le32(!!(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT));
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off,
+ const u8 *bssid_override)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
+ force_assoc_off,
+ bssid_override);
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!vif->p2p)
+ return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+ else
+ return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ case NL80211_IFTYPE_ADHOC:
+ return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+ true, NULL);
+ if (ret)
+ return ret;
+
+ /* will only do anything at resume from D3 time */
+ iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
+ mvmvif->uploaded = true;
+ return 0;
+}
+
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off, const u8 *bssid_override)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+ force_assoc_off, bssid_override);
+}
+
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd;
+ int ret;
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
+ sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
+ return ret;
+ }
+
+ mvmvif->uploaded = false;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
+ iwl_mvm_dealloc_snif_sta(mvm);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
+ struct ieee80211_vif *csa_vif, u32 gp2,
+ bool tx_success)
+{
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(csa_vif);
+
+ /* Don't start to countdown from a failed beacon */
+ if (!tx_success && !mvmvif->csa_countdown)
+ return;
+
+ mvmvif->csa_countdown = true;
+
+ if (!ieee80211_csa_is_complete(csa_vif)) {
+ int c = ieee80211_csa_update_counter(csa_vif);
+
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
+ if (csa_vif->p2p &&
+ !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
+ tx_success) {
+ u32 rel_time = (c + 1) *
+ csa_vif->bss_conf.beacon_int -
+ IWL_MVM_CHANNEL_SWITCH_TIME_GO;
+ u32 apply_time = gp2 + rel_time * 1024;
+
+ iwl_mvm_schedule_csa_period(mvm, csa_vif,
+ IWL_MVM_CHANNEL_SWITCH_TIME_GO -
+ IWL_MVM_CHANNEL_SWITCH_MARGIN,
+ apply_time);
+ }
+ } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
+ /* we don't have CSA NoA scheduled yet, switch now */
+ ieee80211_csa_finish(csa_vif);
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ }
+}
+
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
+ struct iwl_mvm_tx_resp *beacon_notify_hdr;
+ struct ieee80211_vif *csa_vif;
+ struct ieee80211_vif *tx_blocked_vif;
+ struct agg_tx_status *agg_status;
+ u16 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ beacon_notify_hdr = &beacon->beacon_notify_hdr;
+ mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+ mvm->ibss_manager = beacon->ibss_mgr_status != 0;
+
+ agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+ status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
+ IWL_DEBUG_RX(mvm,
+ "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
+ status, beacon_notify_hdr->failure_frame,
+ le64_to_cpu(beacon->tsf),
+ mvm->ap_last_beacon_gp2,
+ le32_to_cpu(beacon_notify_hdr->initial_rate));
+
+ csa_vif = rcu_dereference_protected(mvm->csa_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (unlikely(csa_vif && csa_vif->csa_active))
+ iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
+ (status == TX_STATUS_SUCCESS));
+
+ tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (unlikely(tx_blocked_vif)) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ /*
+ * The channel switch is started and we have blocked the
+ * stations. If this is the first beacon (the timeout wasn't
+ * set), set the unblock timeout, otherwise countdown
+ */
+ if (!mvm->csa_tx_block_bcn_timeout)
+ mvm->csa_tx_block_bcn_timeout =
+ IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
+ else
+ mvm->csa_tx_block_bcn_timeout--;
+
+ /* Check if the timeout is expired, and unblock tx */
+ if (mvm->csa_tx_block_bcn_timeout == 0) {
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ }
+ }
+}
+
+static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_missed_beacons_notif *missed_beacons = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
+ struct iwl_fw_dbg_trigger_tlv *trigger;
+ u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+ u32 rx_missed_bcon, rx_missed_bcon_since_rx;
+
+ if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
+ return;
+
+ rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+ rx_missed_bcon_since_rx =
+ le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
+ /*
+ * TODO: the threshold should be adjusted based on latency conditions,
+ * and/or in case of a CS flow on one of the other AP vifs.
+ */
+ if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
+ IWL_MVM_MISSED_BEACONS_THRESHOLD)
+ ieee80211_beacon_loss(vif);
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
+ FW_DBG_TRIGGER_MISSED_BEACONS))
+ return;
+
+ trigger = iwl_fw_dbg_get_trigger(mvm->fw,
+ FW_DBG_TRIGGER_MISSED_BEACONS);
+ bcon_trig = (void *)trigger->data;
+ stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+ stop_trig_missed_bcon_since_rx =
+ le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+
+ /* TODO: implement start trigger */
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ trigger))
+ return;
+
+ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+ rx_missed_bcon >= stop_trig_missed_bcon)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
+}
+
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+ le32_to_cpu(mb->mac_id),
+ le32_to_cpu(mb->consec_missed_beacons),
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+ le32_to_cpu(mb->num_recvd_beacons),
+ le32_to_cpu(mb->num_expected_beacons));
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_beacon_loss_iterator,
+ mb);
+}
+
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ u32 size = le32_to_cpu(sb->byte_count);
+
+ if (size == 0)
+ return;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ /* update rx_status according to the notification's metadata */
+ memset(&rx_status, 0, sizeof(rx_status));
+ rx_status.mactime = le64_to_cpu(sb->tsf);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+ rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+ rx_status.band =
+ (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
+ rx_status.band);
+
+ /* copy the data */
+ skb_put_data(skb, sb->data, size);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ /* pass it as regular rx to mac80211 */
+ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
+}
+
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *csa_vif;
+ struct iwl_mvm_vif *mvmvif;
+ int len = iwl_rx_packet_payload_len(pkt);
+ u32 id_n_color;
+
+ if (WARN_ON_ONCE(len < sizeof(*notif)))
+ return;
+
+ rcu_read_lock();
+
+ csa_vif = rcu_dereference(mvm->csa_vif);
+ if (WARN_ON(!csa_vif || !csa_vif->csa_active))
+ goto out_unlock;
+
+ id_n_color = le32_to_cpu(notif->id_and_color);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+ if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color,
+ "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
+ FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color))
+ goto out_unlock;
+
+ IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
+
+ schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
+ msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+ csa_vif->bss_conf.beacon_int));
+
+ ieee80211_csa_finish(csa_vif);
+
+ rcu_read_unlock();
+
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+ return;
+
+out_unlock:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
new file mode 100644
index 000000000..3f37fb64e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -0,0 +1,4730 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/time.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+#include <net/tcp.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-io.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-event.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-phy-db.h"
+#include "testmode.h"
+#include "fw/error-dump.h"
+#include "iwl-prph.h"
+#include "iwl-nvm-parse.h"
+
+static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits),
+ },
+};
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ * be the vif's ip address. in case there is not a single
+ * ip address (0, or more than 1), this attribute will
+ * be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ * the LSB bytes of the vif's mac address
+ */
+enum {
+ BC_FILTER_MAGIC_NONE = 0,
+ BC_FILTER_MAGIC_IP,
+ BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+ {
+ /* arp */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+ .attrs = {
+ {
+ /* frame type - arp, hw type - ethernet */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header),
+ .val = cpu_to_be32(0x08060001),
+ .mask = cpu_to_be32(0xffffffff),
+ },
+ {
+ /* arp dest ip */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header) + 2 +
+ sizeof(struct arphdr) +
+ ETH_ALEN + sizeof(__be32) +
+ ETH_ALEN,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+ },
+ },
+ },
+ {
+ /* dhcp offer bcast */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+ .attrs = {
+ {
+ /* udp dest port - 68 (bootp client)*/
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = offsetof(struct udphdr, dest),
+ .val = cpu_to_be32(0x00440000),
+ .mask = cpu_to_be32(0xffff0000),
+ },
+ {
+ /* dhcp - lsb bytes of client hw address */
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = 38,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+ },
+ },
+ },
+ /* last filter must be empty */
+ {},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+ spin_lock_bh(&mvm->refs_lock);
+ mvm->refs[ref_type]++;
+ spin_unlock_bh(&mvm->refs_lock);
+ iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+ spin_lock_bh(&mvm->refs_lock);
+ if (WARN_ON(!mvm->refs[ref_type])) {
+ spin_unlock_bh(&mvm->refs_lock);
+ return;
+ }
+ mvm->refs[ref_type]--;
+ spin_unlock_bh(&mvm->refs_lock);
+ iwl_trans_unref(mvm->trans);
+}
+
+static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
+ enum iwl_mvm_ref_type except_ref)
+{
+ int i, j;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ spin_lock_bh(&mvm->refs_lock);
+ for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+ if (except_ref == i || !mvm->refs[i])
+ continue;
+
+ IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
+ i, mvm->refs[i]);
+ for (j = 0; j < mvm->refs[i]; j++)
+ iwl_trans_unref(mvm->trans);
+ mvm->refs[i] = 0;
+ }
+ spin_unlock_bh(&mvm->refs_lock);
+}
+
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
+{
+ int i;
+ bool taken = false;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return true;
+
+ spin_lock_bh(&mvm->refs_lock);
+ for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+ if (mvm->refs[i]) {
+ taken = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&mvm->refs_lock);
+
+ return taken;
+}
+
+int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ iwl_mvm_ref(mvm, ref_type);
+
+ if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
+ HZ)) {
+ WARN_ON_ONCE(1);
+ iwl_mvm_unref(mvm, ref_type);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
+{
+ int i;
+
+ memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ mvm->phy_ctxts[i].id = i;
+ mvm->phy_ctxts[i].ref = 0;
+ }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcc_update_resp *resp;
+
+ IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
+ if (IS_ERR_OR_NULL(resp)) {
+ IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
+ PTR_ERR_OR_ZERO(resp));
+ goto out;
+ }
+
+ if (changed) {
+ u32 status = le32_to_cpu(resp->status);
+
+ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+ status == MCC_RESP_ILLEGAL);
+ }
+
+ regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+ __le32_to_cpu(resp->n_channels),
+ resp->channels,
+ __le16_to_cpu(resp->mcc),
+ __le16_to_cpu(resp->geo_info),
+ __le16_to_cpu(resp->cap));
+ /* Store the return source id */
+ src_id = resp->source_id;
+ kfree(resp);
+ if (IS_ERR_OR_NULL(regd)) {
+ IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
+ PTR_ERR_OR_ZERO(regd));
+ goto out;
+ }
+
+ IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+ regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
+ mvm->lar_regdom_set = true;
+ mvm->mcc_src = src_id;
+
+out:
+ return regd;
+}
+
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
+{
+ bool changed;
+ struct ieee80211_regdomain *regd;
+
+ if (!iwl_mvm_is_lar_supported(mvm))
+ return;
+
+ regd = iwl_mvm_get_current_regdomain(mvm, &changed);
+ if (!IS_ERR_OR_NULL(regd)) {
+ /* only update the regulatory core if changed */
+ if (changed)
+ regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+ kfree(regd);
+ }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+ bool *changed)
+{
+ return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
+ iwl_mvm_is_wifi_mcc_supported(mvm) ?
+ MCC_SOURCE_GET_CURRENT :
+ MCC_SOURCE_OLD_FW, changed);
+}
+
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
+{
+ enum iwl_mcc_source used_src;
+ struct ieee80211_regdomain *regd;
+ int ret;
+ bool changed;
+ const struct ieee80211_regdomain *r =
+ rtnl_dereference(mvm->hw->wiphy->regd);
+
+ if (!r)
+ return -ENOENT;
+
+ /* save the last source in case we overwrite it below */
+ used_src = mvm->mcc_src;
+ if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
+ /* Notify the firmware we support wifi location updates */
+ regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+ if (!IS_ERR_OR_NULL(regd))
+ kfree(regd);
+ }
+
+ /* Now set our last stored MCC and source */
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
+ &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ /* update cfg80211 if the regdomain was changed */
+ if (changed)
+ ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ else
+ ret = 0;
+
+ kfree(regd);
+ return ret;
+}
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ int num_mac, ret, i;
+ static const u32 mvm_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+ /* Tell mac80211 our characteristics */
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm))
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+ /*
+ * we absolutely need this for the new TX API since that comes
+ * with many more queues than the current code can deal with
+ * for station powersave
+ */
+ return -EINVAL;
+ }
+
+ if (mvm->trans->num_rx_queues > 1)
+ ieee80211_hw_set(hw, USES_RSS);
+
+ if (mvm->trans->max_skb_frags)
+ hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+
+ hw->queues = IEEE80211_MAX_QUEUES;
+ hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+ /* this is the case for CCK frames, it's better (only 8) for OFDM */
+ hw->radiotap_timestamp.accuracy = 22;
+
+ if (!iwl_mvm_has_tlc_offload(mvm))
+ hw->rate_control_algorithm = RS_NAME;
+
+ hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
+ memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+ hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+ hw->wiphy->cipher_suites = mvm->ciphers;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_GCMP;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_GCMP_256;
+ hw->wiphy->n_cipher_suites++;
+ }
+
+ /* Enable 11w if software crypto is not enabled (as the
+ * firmware will interpret some mgmt packets, so enabling it
+ * with software crypto isn't safe).
+ */
+ if (!iwlwifi_mod_params.swcrypto) {
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_AES_CMAC;
+ hw->wiphy->n_cipher_suites++;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_128;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_256;
+ hw->wiphy->n_cipher_suites++;
+ }
+ }
+
+ /* currently FW API supports only one optional cipher scheme */
+ if (mvm->fw->cs[0].cipher) {
+ const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
+ struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
+
+ mvm->hw->n_cipher_schemes = 1;
+
+ cs->cipher = le32_to_cpu(fwcs->cipher);
+ cs->iftype = BIT(NL80211_IFTYPE_STATION);
+ cs->hdr_len = fwcs->hdr_len;
+ cs->pn_len = fwcs->pn_len;
+ cs->pn_off = fwcs->pn_off;
+ cs->key_idx_off = fwcs->key_idx_off;
+ cs->key_idx_mask = fwcs->key_idx_mask;
+ cs->key_idx_shift = fwcs->key_idx_shift;
+ cs->mic_len = fwcs->mic_len;
+
+ mvm->hw->cipher_schemes = mvm->cs;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
+ hw->wiphy->n_cipher_suites++;
+ }
+
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ hw->wiphy->features |=
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+
+ hw->sta_data_size = sizeof(struct iwl_mvm_sta);
+ hw->vif_data_size = sizeof(struct iwl_mvm_vif);
+ hw->chanctx_data_size = sizeof(u16);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+ if (iwl_mvm_is_lar_supported(mvm))
+ hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ else
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
+
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwl_mvm_iface_combinations);
+
+ hw->wiphy->max_remain_on_channel_duration = 10000;
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+ /* we can compensate an offset of up to 3 channels = 15 MHz */
+ hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
+
+ /* Extract MAC address */
+ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
+ hw->wiphy->addresses = mvm->addresses;
+ hw->wiphy->n_addresses = 1;
+
+ /* Extract additional MAC addresses if available */
+ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
+ min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
+
+ for (i = 1; i < num_mac; i++) {
+ memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
+ ETH_ALEN);
+ mvm->addresses[i].addr[5]++;
+ hw->wiphy->n_addresses++;
+ }
+
+ iwl_mvm_reset_phy_ctxts(mvm);
+
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+
+ BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+ BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+ else
+ mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+
+ if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+ hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+ }
+
+ hw->wiphy->hw_version = mvm->trans->hw_id;
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_sched_scan_reqs = 1;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+ hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+ hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
+
+ /*
+ * the firmware uses u8 for num of iterations, but 0xff is saved for
+ * infinite loop, so the maximum number of iterations is actually 254.
+ */
+ hw->wiphy->max_sched_scan_plan_iterations = 254;
+
+ hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_DYNAMIC_SMPS |
+ NL80211_FEATURE_STATIC_SMPS |
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
+ hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
+ hw->wiphy->features |= NL80211_FEATURE_QUIET;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ hw->wiphy->features |=
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+ hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ }
+
+ if (iwl_mvm_is_oce_supported(mvm)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+ }
+
+ mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+#ifdef CONFIG_PM_SLEEP
+ if (iwl_mvm_is_d0i3_supported(mvm) &&
+ device_can_wakeup(mvm->trans->dev)) {
+ mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+ hw->wiphy->wowlan = &mvm->wowlan;
+ }
+
+ if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
+ mvm->trans->ops->d3_suspend &&
+ mvm->trans->ops->d3_resume &&
+ device_can_wakeup(mvm->trans->dev)) {
+ mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT;
+ if (!iwlwifi_mod_params.swcrypto)
+ mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
+ hw->wiphy->wowlan = &mvm->wowlan;
+ }
+#endif
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* assign default bcast filtering configuration */
+ mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
+ ret = iwl_mvm_leds_init(mvm);
+ if (ret)
+ return ret;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
+ IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
+ hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+ }
+
+ hw->netdev_features |= mvm->cfg->features;
+ if (!iwl_mvm_is_csum_supported(mvm)) {
+ hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+ NETIF_F_RXCSUM);
+ /* We may support SW TX CSUM */
+ if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+ }
+
+ ret = ieee80211_register_hw(mvm->hw);
+ if (ret)
+ iwl_mvm_leds_exit(mvm);
+ mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
+
+ if (mvm->cfg->vht_mu_mimo_supported)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+
+ return ret;
+}
+
+static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm_sta *mvmsta;
+ bool defer = false;
+
+ /*
+ * double check the IN_D0I3 flag both before and after
+ * taking the spinlock, in order to prevent taking
+ * the spinlock when not needed.
+ */
+ if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
+ return false;
+
+ spin_lock(&mvm->d0i3_tx_lock);
+ /*
+ * testing the flag again ensures the skb dequeue
+ * loop (on d0i3 exit) hasn't run yet.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
+ mvmsta->sta_id != mvm->d0i3_ap_sta_id)
+ goto out;
+
+ __skb_queue_tail(&mvm->d0i3_tx, skb);
+ ieee80211_stop_queues(mvm->hw);
+
+ /* trigger wakeup */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
+
+ defer = true;
+out:
+ spin_unlock(&mvm->d0i3_tx_lock);
+ return defer;
+}
+
+static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
+ goto drop;
+ }
+
+ if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+ !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+ goto drop;
+
+ /* treat non-bufferable MMPDUs on AP interfaces as broadcast */
+ if ((info->control.vif->type == NL80211_IFTYPE_AP ||
+ info->control.vif->type == NL80211_IFTYPE_ADHOC) &&
+ ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control))
+ sta = NULL;
+
+ /* If there is no sta, and it's not offchannel - send through AP */
+ if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+ info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+
+ if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ /* mac80211 holds rcu read lock */
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ goto drop;
+ }
+ }
+
+ if (sta) {
+ if (iwl_mvm_defer_tx(mvm, sta, skb))
+ return;
+ if (iwl_mvm_tx_skb(mvm, skb, sta))
+ goto drop;
+ return;
+ }
+
+ if (iwl_mvm_tx_skb_non_sta(mvm, skb))
+ goto drop;
+ return;
+ drop:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
+ do { \
+ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
+ break; \
+ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \
+ } while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+ enum ieee80211_ampdu_mlme_action action)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ return;
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_OPERATIONAL: {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+ "TX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, tid_data->ssn);
+ break;
+ }
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+ "TX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+ "RX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, rx_ba_ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+ "RX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ default:
+ break;
+ }
+}
+
+static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+ bool tx_agg_ref = false;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u16 buf_size = params->buf_size;
+ bool amsdu = params->amsdu;
+ u16 timeout = params->timeout;
+
+ IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
+ sta->addr, tid, action);
+
+ if (!(mvm->nvm_data->sku_cap_11n_enable))
+ return -EACCES;
+
+ /* return from D0i3 before starting a new Tx aggregation */
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /*
+ * for tx start, wait synchronously until D0i3 exit to
+ * get the correct sequence number for the tid.
+ * additionally, some other ampdu actions use direct
+ * target access, which is not handled automatically
+ * by the trans layer (unlike commands), so wait for
+ * d0i3 exit in these cases as well.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
+ if (ret)
+ return ret;
+
+ tx_agg_ref = true;
+ break;
+ default:
+ break;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
+ iwl_mvm_sta_from_mac80211(sta)->sta_id) {
+ struct iwl_mvm_vif *mvmvif;
+ u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
+
+ mdata->opened_rx_ba_sessions = true;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
+ }
+ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
+ timeout);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
+ timeout);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
+ buf_size, amsdu);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ u16 rx_ba_ssn = 0;
+
+ if (action == IEEE80211_AMPDU_RX_START)
+ rx_ba_ssn = *ssn;
+
+ iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+ rx_ba_ssn, action);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * If the tid is marked as started, we won't use it for offloaded
+ * traffic on the next D0i3 entry. It's safe to unref.
+ */
+ if (tx_agg_ref)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+
+ return ret;
+}
+
+static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->uploaded = false;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ mvmvif->phy_ctxt = NULL;
+ memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+}
+
+static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+{
+ /* clear the D3 reconfig, we only need it to avoid dumping a
+ * firmware coredump on reconfiguration, we shouldn't do that
+ * on D3->D0 transition
+ */
+ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
+ mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
+ iwl_fw_error_dump(&mvm->fwrt);
+ }
+
+ /* cleanup all stale references (scan, roc), but keep the
+ * ucode_down ref until reconfig is complete
+ */
+ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+ iwl_mvm_stop_device(mvm);
+
+ mvm->scan_status = 0;
+ mvm->ps_disabled = false;
+ mvm->calibrating = false;
+
+ /* just in case one was running */
+ iwl_mvm_cleanup_roc_te(mvm);
+ ieee80211_remain_on_channel_expired(mvm->hw);
+
+ /*
+ * cleanup all interfaces, even inactive ones, as some might have
+ * gone down during the HW restart
+ */
+ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+
+ mvm->p2p_device_vif = NULL;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+
+ iwl_mvm_reset_phy_ctxts(mvm);
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
+ memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+ ieee80211_wake_queues(mvm->hw);
+
+ /* clear any stale d0i3 state */
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+
+ mvm->vif_count = 0;
+ mvm->rx_ba_sessions = 0;
+ mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ mvm->monitor_on = false;
+
+ /* keep statistics ticking */
+ iwl_mvm_accu_radio_stats(mvm);
+}
+
+int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
+ /*
+ * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
+ * so later code will - from now on - see that we're doing it.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+ /* Clean up some internal and mac80211 state on restart */
+ iwl_mvm_restart_cleanup(mvm);
+ } else {
+ /* Hold the reference to prevent runtime suspend while
+ * the start procedure runs. It's a bit confusing
+ * that the UCODE_DOWN reference is taken, but it just
+ * means "UCODE is not UP yet". ( TODO: rename this
+ * reference).
+ */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ }
+ ret = iwl_mvm_up(mvm);
+
+ if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ /* Something went wrong - we need to finish some cleanup
+ * that normally iwl_mvm_mac_restart_complete() below
+ * would do.
+ */
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ iwl_mvm_d0i3_enable_tx(mvm, NULL);
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ /* Some hw restart cleanups must not hold the mutex */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ /*
+ * Make sure we are out of d0i3. This is needed
+ * to make sure the reference accounting is correct
+ * (and there is no stale d0i3_exit_work).
+ */
+ wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3,
+ &mvm->status),
+ HZ);
+ }
+
+ mutex_lock(&mvm->mutex);
+ ret = __iwl_mvm_mac_start(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ iwl_mvm_d0i3_enable_tx(mvm, NULL);
+ ret = iwl_mvm_update_quotas(mvm, true, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
+ ret);
+
+ /* allow transport/FW low power modes */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+ /*
+ * If we have TDLS peers, remove them. We don't know the last seqno/PN
+ * of packets the FW sent out, so we must reconnect.
+ */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
+{
+ if (iwl_mvm_is_d0i3_supported(mvm) &&
+ iwl_mvm_enter_d0i3_on_suspend(mvm))
+ WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3,
+ &mvm->status),
+ HZ),
+ "D0i3 exit on resume timed out\n");
+}
+
+static void
+iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (reconfig_type) {
+ case IEEE80211_RECONFIG_TYPE_RESTART:
+ iwl_mvm_restart_complete(mvm);
+ break;
+ case IEEE80211_RECONFIG_TYPE_SUSPEND:
+ iwl_mvm_resume_complete(mvm);
+ break;
+ }
+}
+
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* firmware counters are obviously reset now, but we shouldn't
+ * partially track so also clear the fw_reset_accu counters.
+ */
+ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
+
+ /* async_handlers_wk is now blocked */
+
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ flush_work(&mvm->roc_done_wk);
+
+ iwl_mvm_stop_device(mvm);
+
+ iwl_mvm_async_handlers_purge(mvm);
+ /* async_handlers_list is empty and will stay empty: HW is stopped */
+
+ /* the fw is stopped, the aux sta is dead: clean up driver state */
+ iwl_mvm_del_aux_sta(mvm);
+
+ /*
+ * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
+ * hw (as restart_complete() won't be called in this case) and mac80211
+ * won't execute the restart.
+ * But make sure to cleanup interfaces that have gone down before/during
+ * HW restart was requested.
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_cleanup_iterator, mvm);
+
+ /* We shouldn't have any UIDs still set. Loop over all the UIDs to
+ * make sure there's nothing left there and warn if any is found.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ int i;
+
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
+ }
+ }
+}
+
+static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ flush_work(&mvm->d0i3_exit_work);
+ flush_work(&mvm->async_handlers_wk);
+ flush_work(&mvm->add_stream_wk);
+
+ /*
+ * Lock and clear the firmware running bit here already, so that
+ * new commands coming in elsewhere, e.g. from debugfs, will not
+ * be able to proceed. This is important here because one of those
+ * debugfs files causes the firmware dump to be triggered, and if we
+ * don't stop debugfs accesses before canceling that it could be
+ * retriggered after we flush it but before we've cleared the bit.
+ */
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+ iwl_fw_cancel_dump(&mvm->fwrt);
+ cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+ cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
+ iwl_fw_free_dump_desc(&mvm->fwrt);
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_mac_stop(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * The worker might have been waiting for the mutex, let it run and
+ * discover that its list is now empty.
+ */
+ cancel_work_sync(&mvm->async_handlers_wk);
+}
+
+static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
+{
+ u16 i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < NUM_PHY_CTX; i++)
+ if (!mvm->phy_ctxts[i].ref)
+ return &mvm->phy_ctxts[i];
+
+ IWL_ERR(mvm, "No available PHY context\n");
+ return NULL;
+}
+
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s16 tx_power)
+{
+ struct iwl_dev_tx_power_cmd cmd = {
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v3.mac_context_id =
+ cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
+ .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
+ };
+ int len = sizeof(cmd);
+
+ if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
+ cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v3);
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ mvmvif->mvm = mvm;
+
+ /*
+ * make sure D0i3 exit is completed, otherwise a target access
+ * during tx queue configuration could be done when still in
+ * D0i3 state.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
+ if (ret)
+ return ret;
+
+ /*
+ * Not much to do here. The stack will not allow interface
+ * types or combinations that we didn't advertise, so we
+ * don't really have to check the types.
+ */
+
+ mutex_lock(&mvm->mutex);
+
+ /* make sure that beacon statistics don't go backwards with FW reset */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
+ /* Allocate resources for the MAC context, and add it to the fw */
+ ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Counting number of interfaces is needed for legacy PM */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvm->vif_count++;
+
+ /*
+ * The AP binding flow can be done only after the beacon
+ * template is configured (which happens only in the mac80211
+ * start_ap() flow), and adding the broadcast station can happen
+ * only after the binding.
+ * In addition, since modifying the MAC before adding a bcast
+ * station is not allowed by the FW, delay the adding of MAC context to
+ * the point where we can also add the bcast station.
+ * In short: there's not much we can do at this point, other than
+ * allocating resources :)
+ */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to allocate bcast sta\n");
+ goto out_release;
+ }
+
+ /*
+ * Only queue for this station is the mcast queue,
+ * which shouldn't be in TFD mask anyway
+ */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
+ 0, vif->type,
+ IWL_STA_MULTICAST);
+ if (ret)
+ goto out_release;
+
+ iwl_mvm_vif_dbgfs_register(mvm, vif);
+ goto out_unlock;
+ }
+
+ mvmvif->features |= hw->netdev_features;
+
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_release;
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ goto out_remove_mac;
+
+ /* beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_remove_mac;
+
+ if (!mvm->bf_allowed_vif &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+ mvm->bf_allowed_vif = mvmvif;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+ /*
+ * P2P_DEVICE interface does not have a channel context assigned to it,
+ * so a dedicated PHY context is allocated to it and the corresponding
+ * MAC context is bound to it at this stage.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+
+ mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!mvmvif->phy_ctxt) {
+ ret = -ENOSPC;
+ goto out_free_bf;
+ }
+
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_unref_phy;
+
+ ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+
+ /* Save a pointer to p2p device vif, so it can later be used to
+ * update the p2p device MAC when a GO is started/stopped */
+ mvm->p2p_device_vif = vif;
+ }
+
+ iwl_mvm_tcm_add_vif(mvm, vif);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = true;
+
+ iwl_mvm_vif_dbgfs_register(mvm, vif);
+ goto out_unlock;
+
+ out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ out_unref_phy:
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+ out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+ out_remove_mac:
+ mvmvif->phy_ctxt = NULL;
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+ out_release:
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvm->vif_count--;
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
+
+ return ret;
+}
+
+static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ /*
+ * Flush the ROC worker which will flush the OFFCHANNEL queue.
+ * We assume here that all the packets sent to the OFFCHANNEL
+ * queue are sent in ROC session.
+ */
+ flush_work(&mvm->roc_done_wk);
+ }
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_prepare_mac_removal(mvm, vif);
+
+ if (!(vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC))
+ iwl_mvm_tcm_rm_vif(mvm, vif);
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+
+ iwl_mvm_vif_dbgfs_clean(mvm, vif);
+
+ /*
+ * For AP/GO interface, the tear down of the resources allocated to the
+ * interface is be handled as part of the stop_ap flow.
+ */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+ if (vif == mvm->noa_vif) {
+ mvm->noa_vif = NULL;
+ mvm->noa_duration = 0;
+ }
+#endif
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
+ iwl_mvm_dealloc_bcast_sta(mvm, vif);
+ goto out_release;
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mvm->p2p_device_vif = NULL;
+ iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+ mvmvif->phy_ctxt = NULL;
+ }
+
+ if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvm->vif_count--;
+
+ iwl_mvm_power_update_mac(mvm);
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = false;
+
+out_release:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+struct iwl_mvm_mc_iter_data {
+ struct iwl_mvm *mvm;
+ int port_id;
+};
+
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mc_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int ret, len;
+
+ /* if we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ cmd->port_id = data->port_id++;
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret)
+ IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_mc_iter_data iter_data = {
+ .mvm = mvm,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mc_iface_iterator, &iter_data);
+
+ /*
+ * Send a (synchronous) ech command so that we wait for the
+ * multiple asynchronous MCAST_FILTER_CMD commands sent by
+ * the interface iterator. Otherwise, we might get here over
+ * and over again (by userspace just sending a lot of these)
+ * and the CPU can send them faster than the firmware can
+ * process them.
+ * Note that the CPU is still faster - but with this we'll
+ * actually send fewer commands overall because the CPU will
+ * not schedule the work in mac80211 as frequently if it's
+ * still running when rescheduled (possibly multiple times).
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to synchronize multicast groups update\n");
+}
+
+static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count;
+ bool pass_all;
+ int len;
+
+ addr_count = netdev_hw_addr_list_count(mc_list);
+ pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
+ IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
+ if (pass_all)
+ addr_count = 0;
+
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ return 0;
+
+ if (pass_all) {
+ cmd->pass_all = 1;
+ return (u64)(unsigned long)cmd;
+ }
+
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+ memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr, ETH_ALEN);
+ cmd->count++;
+ }
+
+ return (u64)(unsigned long)cmd;
+}
+
+static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
+
+ mutex_lock(&mvm->mutex);
+
+ /* replace previous configuration */
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = cmd;
+
+ if (!cmd)
+ goto out;
+
+ if (changed_flags & FIF_ALLMULTI)
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+ if (cmd->pass_all)
+ cmd->count = 0;
+
+ iwl_mvm_recalc_multicast(mvm);
+out:
+ mutex_unlock(&mvm->mutex);
+ *total_flags = 0;
+}
+
+static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int filter_flags,
+ unsigned int changed_flags)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* We support only filter for probe requests */
+ if (!(changed_flags & FIF_PROBE_REQ))
+ return;
+
+ /* Supported only for p2p client interfaces */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+ !vif->p2p)
+ return;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ mutex_unlock(&mvm->mutex);
+}
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_bcast_filter_cmd *cmd;
+ u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+ const struct iwl_fw_bcast_filter *in_filter,
+ struct iwl_fw_bcast_filter *out_filter)
+{
+ struct iwl_fw_bcast_filter_attr *attr;
+ int i;
+
+ memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+ for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+ attr = &out_filter->attrs[i];
+
+ if (!attr->mask)
+ break;
+
+ switch (attr->reserved1) {
+ case cpu_to_le16(BC_FILTER_MAGIC_IP):
+ if (vif->bss_conf.arp_addr_cnt != 1) {
+ attr->mask = 0;
+ continue;
+ }
+
+ attr->val = vif->bss_conf.arp_addr_list[0];
+ break;
+ case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+ attr->val = *(__be32 *)&vif->addr[2];
+ break;
+ default:
+ break;
+ }
+ attr->reserved1 = 0;
+ out_filter->num_attrs++;
+ }
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_bcast_filter_cmd *cmd = data->cmd;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_fw_bcast_mac *bcast_mac;
+ int i;
+
+ if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+ return;
+
+ bcast_mac = &cmd->macs[mvmvif->id];
+
+ /*
+ * enable filtering only for associated stations, but not for P2P
+ * Clients
+ */
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
+ !vif->bss_conf.assoc)
+ return;
+
+ bcast_mac->default_discard = 1;
+
+ /* copy all configured filters */
+ for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+ /*
+ * Make sure we don't exceed our filters limit.
+ * if there is still a valid filter to be configured,
+ * be on the safe side and just allow bcast for this mac.
+ */
+ if (WARN_ON_ONCE(data->current_filter >=
+ ARRAY_SIZE(cmd->filters))) {
+ bcast_mac->default_discard = 0;
+ bcast_mac->attached_filters = 0;
+ break;
+ }
+
+ iwl_mvm_set_bcast_filter(vif,
+ &mvm->bcast_filters[i],
+ &cmd->filters[data->current_filter]);
+
+ /* skip current filter if it contains no attributes */
+ if (!cmd->filters[data->current_filter].num_attrs)
+ continue;
+
+ /* attach the filter to current mac */
+ bcast_mac->attached_filters |=
+ cpu_to_le16(BIT(data->current_filter));
+
+ data->current_filter++;
+ }
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd)
+{
+ struct iwl_bcast_iter_data iter_data = {
+ .mvm = mvm,
+ .cmd = cmd,
+ };
+
+ if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
+ return false;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+ cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* use debugfs filters/macs if override is configured */
+ if (mvm->dbgfs_bcast_filtering.override) {
+ memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+ sizeof(cmd->filters));
+ memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+ sizeof(cmd->macs));
+ return true;
+ }
+#endif
+
+ /* if no filters are configured, do nothing */
+ if (!mvm->bcast_filters)
+ return false;
+
+ /* configure and attach these filters for each associated sta vif */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bcast_filter_iterator, &iter_data);
+
+ return true;
+}
+
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
+{
+ struct iwl_bcast_filter_cmd cmd;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+ return 0;
+
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
+ sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+#endif
+
+static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mu_group_mgmt_cmd cmd = {};
+
+ memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
+ WLAN_MEMBERSHIP_LEN);
+ memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
+ WLAN_USER_POSITION_LEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ UPDATE_MU_GROUPS_CMD),
+ 0, sizeof(cmd), &cmd);
+}
+
+static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->mu_mimo_owner) {
+ struct iwl_mu_group_mgmt_notif *notif = _data;
+
+ /*
+ * MU-MIMO Group Id action frame is little endian. We treat
+ * the data received from firmware as if it came from the
+ * action frame, so no conversion is needed.
+ */
+ ieee80211_update_mu_groups(vif,
+ (u8 *)&notif->membership_status,
+ (u8 *)&notif->user_position);
+ }
+}
+
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mu_mimo_iface_iterator, notif);
+}
+
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /*
+ * If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 sta_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ .sta_id = sta_id,
+ .tid_limit = IWL_MAX_TID_COUNT,
+ .bss_color = vif->bss_conf.bss_color,
+ .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+ .frame_time_rts_th =
+ cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+ };
+ struct ieee80211_sta *sta;
+ u32 flags;
+ int i;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+ if (IS_ERR(sta)) {
+ rcu_read_unlock();
+ WARN(1, "Can't find STA to configure HE\n");
+ return;
+ }
+
+ if (!sta->he_cap.has_he) {
+ rcu_read_unlock();
+ return;
+ }
+
+ flags = 0;
+
+ /* HTC flags */
+ if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_HTC_HE)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+ if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+ else if (link_adap == 3)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+ }
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+ /*
+ * Initialize the PPE thresholds to "None" (7), as described in Table
+ * 9-262ac of 80211.ax/D3.0.
+ */
+ memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+ if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 ru_index_bitmap =
+ (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 bw;
+
+ for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+ ru_index_tmp >>= 1;
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ }
+ }
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+ rcu_read_unlock();
+
+ /* Mark MU EDCA as enabled, unless none detected on some AC */
+ flags |= STA_CTXT_HE_MU_EDCA_CW;
+ for (i = 0; i < AC_NUM; i++) {
+ struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mvmvif->queue_params[i].mu_edca_param_rec;
+
+ if (!mvmvif->queue_params[i].mu_edca) {
+ flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+ break;
+ }
+
+ sta_ctxt_cmd.trig_based_txf[i].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ sta_ctxt_cmd.trig_based_txf[i].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ sta_ctxt_cmd.trig_based_txf[i].aifsn =
+ cpu_to_le16(mu_edca->aifsn);
+ sta_ctxt_cmd.trig_based_txf[i].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+
+ if (vif->bss_conf.multi_sta_back_32bit)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (vif->bss_conf.ack_enabled)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
+ if (vif->bss_conf.uora_exists) {
+ flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+ sta_ctxt_cmd.rand_alloc_ecwmin =
+ vif->bss_conf.uora_ocw_range & 0x7;
+ sta_ctxt_cmd.rand_alloc_ecwmax =
+ (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+ }
+
+ /* TODO: support Multi BSSID IE */
+
+ sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+ DATA_PATH_GROUP, 0),
+ 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
+static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ /*
+ * Re-calculate the tsf id, as the master-slave relations depend on the
+ * beacon interval, which was not known when the station interface was
+ * added.
+ */
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ }
+
+ /*
+ * If we're not associated yet, take the (new) BSSID before associating
+ * so the firmware knows. If we're already associated, then use the old
+ * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
+ * branch for disassociation below.
+ */
+ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
+ memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ /* after sending it once, adopt mac80211 data */
+ memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
+ mvmvif->associated = bss_conf->assoc;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ memset(&mvmvif->beacon_stats, 0,
+ sizeof(mvmvif->beacon_stats));
+
+ /* add quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, true, NULL);
+ if (ret) {
+ IWL_ERR(mvm, "failed to update quotas\n");
+ return;
+ }
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ /*
+ * If we're restarting then the firmware will
+ * obviously have lost synchronisation with
+ * the AP. It will attempt to synchronise by
+ * itself, but we can make it more reliable by
+ * scheduling a session protection time event.
+ *
+ * The firmware needs to receive a beacon to
+ * catch up with synchronisation, use 110% of
+ * the beacon interval.
+ *
+ * Set a large maximum delay to allow for more
+ * than a single interface.
+ */
+ u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+ iwl_mvm_protect_session(mvm, vif, dur, dur,
+ 5 * dur, false);
+ }
+
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ if (vif->p2p) {
+ iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC);
+ }
+ } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+ /*
+ * If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
+ */
+ WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+ "Failed to update SF upon disassociation\n");
+
+ /*
+ * If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ /*
+ * Remove AP station now that
+ * the MAC is unassoc
+ */
+ ret = iwl_mvm_rm_sta_id(mvm, vif,
+ mvmvif->ap_sta_id);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to remove AP station\n");
+
+ if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ mvm->d0i3_ap_sta_id =
+ IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+ }
+
+ /* remove quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to update quotas\n");
+
+ if (vif->p2p)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
+
+ /* this will take the cleared BSSID from bss_conf */
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update MAC %pM (clear after unassoc)\n",
+ vif->addr);
+ }
+
+ /*
+ * The firmware tracks the MU-MIMO group on its own.
+ * However, on HW restart we should restore this data.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
+ ret = iwl_mvm_update_mu_groups(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update VHT MU_MIMO groups\n");
+ }
+
+ iwl_mvm_recalc_multicast(mvm);
+ iwl_mvm_configure_bcast_filter(mvm);
+
+ /* reset rssi values */
+ mvmvif->bf_data.ave_beacon_signal = 0;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
+ IEEE80211_SMPS_AUTOMATIC);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_INFO) {
+ /*
+ * We received a beacon from the associated AP so
+ * remove the session protection.
+ */
+ iwl_mvm_stop_session_protection(mvm, vif);
+
+ iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ }
+
+ if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
+ /*
+ * Send power command on every beacon change,
+ * because we may have not enabled beacon abort yet.
+ */
+ BSS_CHANGED_BEACON_INFO)) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+ bss_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+ }
+
+ if (changes & BSS_CHANGED_CQM) {
+ IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
+ /* reset cqm events tracking */
+ mvmvif->bf_data.last_cqm_event = 0;
+ if (mvmvif->bf_data.bf_enabled) {
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update CQM thresholds\n");
+ }
+ }
+
+ if (changes & BSS_CHANGED_ARP_FILTER) {
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
+ iwl_mvm_configure_bcast_filter(mvm);
+ }
+}
+
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ /*
+ * iwl_mvm_mac_ctxt_add() might read directly from the device
+ * (the system time), so make sure it is available.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Send the beacon template */
+ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Re-calculate the tsf id, as the master-slave relations depend on the
+ * beacon interval, which was not known when the AP interface was added.
+ */
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
+ mvmvif->ap_assoc_sta_count = 0;
+
+ /* Add the mac context */
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Perform the binding */
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_remove;
+
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs adding the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW so make the order of removal depend on
+ * the TLV
+ */
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+ /*
+ * Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+ if (ret) {
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ goto out_unbind;
+ }
+ } else {
+ /*
+ * Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret) {
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ goto out_unbind;
+ }
+ }
+
+ /* must be set before quota calculations */
+ mvmvif->ap_ibss_active = true;
+
+ /* power updated needs to be done before quotas */
+ iwl_mvm_power_update_mac(mvm);
+
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (ret)
+ goto out_quota_failed;
+
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ /* we don't support TDLS during DCM */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ goto out_unlock;
+
+out_quota_failed:
+ iwl_mvm_power_update_mac(mvm);
+ mvmvif->ap_ibss_active = false;
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+out_remove:
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
+ return ret;
+}
+
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_prepare_mac_removal(mvm, vif);
+
+ mutex_lock(&mvm->mutex);
+
+ /* Handle AP stop while in CSA */
+ if (rcu_access_pointer(mvm->csa_vif) == vif) {
+ iwl_mvm_remove_time_event(mvm, mvmvif,
+ &mvmvif->time_event_data);
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ mvmvif->csa_countdown = false;
+ }
+
+ if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ mvm->csa_tx_block_bcn_timeout = 0;
+ }
+
+ mvmvif->ap_ibss_active = false;
+ mvm->ap_last_beacon_gp2 = 0;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
+
+ iwl_mvm_update_quotas(mvm, false, NULL);
+
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs removing the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW (which will stop beaconing when removing
+ * bcast station).
+ * So make the order of removal depend on the TLV
+ */
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+
+ iwl_mvm_power_update_mac(mvm);
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* Changes will be applied when the AP/IBSS is started */
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ /* Need to send a new beacon template to the FW */
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
+
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+ bss_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+ }
+}
+
+static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /*
+ * iwl_mvm_bss_info_changed_station() might call
+ * iwl_mvm_protect_session(), which reads directly from
+ * the device (the system time), so make sure it is available.
+ */
+ if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mvm_update_mu_groups(mvm, vif);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ mutex_unlock(&mvm->mutex);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
+}
+
+static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ if (hw_req->req.n_channels == 0 ||
+ hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a hw_scan when it's already stopped. This can
+ * happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_scan_completed() and the userspace called
+ * cancel scan scan before ieee80211_scan_work() could run.
+ * To handle that, simply return if the scan is not running.
+ */
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from mac80211 */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, true);
+}
+
+static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned long txqs = 0, tids = 0;
+ int tid;
+
+ /*
+ * If we have TVQM then we get too high queue numbers - luckily
+ * we really shouldn't get here with that because such hardware
+ * should have firmware supporting buffer station offload.
+ */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ __set_bit(tid_data->txq_id, &txqs);
+
+ if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
+ continue;
+
+ __set_bit(tid, &tids);
+ }
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
+ ieee80211_sta_set_buffered(sta, tid, true);
+
+ if (txqs)
+ iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
+ /*
+ * The fw updates the STA to be asleep. Tx packets on the Tx
+ * queues to this station will not be transmitted. The fw will
+ * send a Tx response with TX_STATUS_FAIL_DEST_PS.
+ */
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+ break;
+
+ if (txqs)
+ iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
+ iwl_mvm_sta_modify_ps_wake(mvm, sta);
+ break;
+ default:
+ break;
+ }
+ spin_unlock_bh(&mvmsta->lock);
+}
+
+static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ __iwl_mvm_mac_sta_notify(hw, cmd, sta);
+}
+
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
+
+ if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+ return;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+ if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!mvmsta->vif ||
+ mvmsta->vif->type != NL80211_IFTYPE_AP) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (mvmsta->sleeping != sleeping) {
+ mvmsta->sleeping = sleeping;
+ __iwl_mvm_mac_sta_notify(mvm->hw,
+ sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
+ sta);
+ ieee80211_sta_ps_transition(sta, sleeping);
+ }
+
+ if (sleeping) {
+ switch (notif->type) {
+ case IWL_MVM_PM_EVENT_AWAKE:
+ case IWL_MVM_PM_EVENT_ASLEEP:
+ break;
+ case IWL_MVM_PM_EVENT_UAPSD:
+ ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
+ break;
+ case IWL_MVM_PM_EVENT_PS_POLL:
+ ieee80211_sta_pspoll(sta);
+ break;
+ default:
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ /*
+ * This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ */
+ mutex_lock(&mvm->mutex);
+ if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+ ERR_PTR(-ENOENT));
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const u8 *bssid)
+{
+ int i;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_tcm_mac *mdata;
+
+ mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ mdata->opened_rx_ba_sessions = false;
+ }
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
+ return;
+
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+
+ if (!vif->p2p &&
+ (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+
+ for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
+ if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
+static void
+iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 *peer_addr,
+ enum nl80211_tdls_operation action)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_tdls *tdls_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
+ tdls_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ return;
+
+ if (!(tdls_trig->action_bitmap & BIT(action)))
+ return;
+
+ if (tdls_trig->peer_mode &&
+ memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "TDLS event occurred, peer %pM, action %d",
+ peer_addr, action);
+}
+
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ struct iwl_mvm_tid_data *tid_data;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock_bh(&mvm_sta->lock);
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ tid_data = &mvm_sta->tid_data[i];
+
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC
+ * queues, so we should never get a second deferred
+ * frame for the RA/TID.
+ */
+ iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
+ ieee80211_free_txskb(mvm->hw, skb);
+ }
+ }
+ spin_unlock_bh(&mvm_sta->lock);
+}
+
+static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ int ret;
+
+ IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ /* this would be a mac80211 bug ... but don't crash */
+ if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+ return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL;
+
+ /*
+ * If we are in a STA removal flow and in DQA mode:
+ *
+ * This is after the sync_rcu part, so the queues have already been
+ * flushed. No more TXs on their way in mac80211's path, and no more in
+ * the queues.
+ * Also, we won't be getting any new TX frames for this station.
+ * What we might have are deferred TX frames that need to be taken care
+ * of.
+ *
+ * Drop any still-queued deferred-frame before removing the STA, and
+ * make sure the worker is no longer handling frames for this STA.
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+ flush_work(&mvm->add_stream_wk);
+
+ /*
+ * No need to make sure deferred TX indication is off since the
+ * worker will already remove it if it was on
+ */
+ }
+
+ mutex_lock(&mvm->mutex);
+ /* track whether or not the station is associated */
+ mvm_sta->sta_state = new_state;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ /*
+ * Firmware bug - it'll crash if the beacon interval is less
+ * than 16. We can't avoid connecting at all, so refuse the
+ * station state change, this will cause mac80211 to abandon
+ * attempts to connect to this AP, and eventually wpa_s will
+ * blacklist the AP...
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.beacon_int < 16) {
+ IWL_ERR(mvm,
+ "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
+ sta->addr, vif->bss_conf.beacon_int);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (sta->tdls &&
+ (vif->p2p ||
+ iwl_mvm_tdls_sta_count(mvm, NULL) ==
+ IWL_MVM_TDLS_STA_COUNT ||
+ iwl_mvm_phy_ctx_count(mvm) > 1)) {
+ IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = iwl_mvm_add_sta(mvm, vif, sta);
+ if (sta->tdls && ret == 0) {
+ iwl_mvm_recalc_tdls_state(mvm, vif, true);
+ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+ NL80211_TDLS_SETUP);
+ }
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_AUTH) {
+ /*
+ * EBS may be disabled due to previous failures reported by FW.
+ * Reset EBS status here assuming environment has been changed.
+ */
+ mvm->last_ebs_successful = true;
+ iwl_mvm_check_uapsd(mvm, vif, sta->addr);
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mvmvif->ap_assoc_sta_count++;
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ }
+
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ false);
+ ret = iwl_mvm_update_sta(mvm, vif, sta);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+
+ /* we don't support TDLS during DCM */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ if (sta->tdls)
+ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+ NL80211_TDLS_ENABLE_LINK);
+
+ /* enable beacon filtering */
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ true);
+
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ /* disable beacon filtering */
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mvmvif->ap_assoc_sta_count--;
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ }
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ ret = iwl_mvm_rm_sta(mvm, vif, sta);
+ if (sta->tdls) {
+ iwl_mvm_recalc_tdls_state(mvm, vif, false);
+ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+ NL80211_TDLS_DISABLE_LINK);
+ }
+ } else {
+ ret = -EIO;
+ }
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ if (sta->tdls && ret == 0) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mvm->rts_threshold = value;
+
+ return 0;
+}
+
+static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ changed & IEEE80211_RC_NSS_CHANGED)
+ iwl_mvm_sf_update(mvm, vif, false);
+}
+
+static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->queue_params[ac] = *params;
+
+ /*
+ * No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ mutex_unlock(&mvm->mutex);
+ return ret;
+ }
+ return 0;
+}
+
+static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 req_duration)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
+ u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
+
+ /*
+ * iwl_mvm_protect_session() reads directly from the device
+ * (the system time), so make sure it is available.
+ */
+ if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
+ return;
+
+ if (req_duration > duration)
+ duration = req_duration;
+
+ mutex_lock(&mvm->mutex);
+ /* Try really hard to protect the session and hear a beacon */
+ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!vif->bss_conf.idle) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a sched_scan when it's already stopped. This
+ * can happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_sched_scan_stopped() and the userspace called
+ * stop sched scan scan before ieee80211_sched_scan_stopped_work()
+ * could run. To handle this, simply return if the scan is
+ * not running.
+ */
+ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
+ mutex_unlock(&mvm->mutex);
+ return 0;
+ }
+
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
+ mutex_unlock(&mvm->mutex);
+ iwl_mvm_wait_for_async_handlers(mvm);
+
+ return ret;
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_key_pn *ptk_pn;
+ int keyidx = key->keyidx;
+ int ret;
+ u8 key_offset;
+
+ if (iwlwifi_mod_params.swcrypto) {
+ IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* For non-client mode, only use WEP keys for TX as we probably
+ * don't have a station yet anyway and would then have to keep
+ * track of the keys, linking them to each of the clients/peers
+ * as they appear. For now, don't do that, for performance WEP
+ * offload doesn't really matter much, but we need it for some
+ * other offload features in client mode.
+ */
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+ break;
+ default:
+ /* currently FW supports only one optional cipher scheme */
+ if (hw->n_cipher_schemes &&
+ hw->cipher_schemes->cipher == key->cipher)
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ switch (cmd) {
+ case SET_KEY:
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta) {
+ /*
+ * GTK on AP interface is a TX-only key, return 0;
+ * on IBSS they're per-station and because we're lazy
+ * we don't support them for RX, so do the same.
+ * CMAC/GMAC in AP/IBSS modes must be done in software.
+ */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ ret = -EOPNOTSUPP;
+ else
+ ret = 0;
+
+ if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
+ key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
+ !iwl_mvm_has_new_tx_api(mvm)) {
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ break;
+ }
+ }
+
+ /* During FW restart, in order to restore the state as it was,
+ * don't try to reprogram keys we previously failed for.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ IWL_DEBUG_MAC80211(mvm,
+ "skip invalid idx key programming during restart\n");
+ ret = 0;
+ break;
+ }
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ sta && iwl_mvm_has_new_rx_api(mvm) &&
+ key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ struct ieee80211_key_seq seq;
+ int tid, q;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
+ ptk_pn = kzalloc(struct_size(ptk_pn, q,
+ mvm->trans->num_rx_queues),
+ GFP_KERNEL);
+ if (!ptk_pn) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ ieee80211_get_key_rx_seq(key, tid, &seq);
+ for (q = 0; q < mvm->trans->num_rx_queues; q++)
+ memcpy(ptk_pn->q[q].pn[tid],
+ seq.ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+
+ rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
+ }
+
+ /* in HW restart reuse the index, otherwise request a new one */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ key_offset = key->hw_key_idx;
+ else
+ key_offset = STA_KEY_IDX_INVALID;
+
+ IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
+ if (ret) {
+ IWL_WARN(mvm, "set key failed\n");
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ ret = 0;
+ }
+
+ break;
+ case DISABLE_KEY:
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ ret = 0;
+ break;
+ }
+
+ if (sta && iwl_mvm_has_new_rx_api(mvm) &&
+ key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ ptk_pn = rcu_dereference_protected(
+ mvmsta->ptk_pn[keyidx],
+ lockdep_is_held(&mvm->mutex));
+ RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
+ if (ptk_pn)
+ kfree_rcu(ptk_pn, rcu_head);
+ }
+
+ IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
+ ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
+}
+
+
+static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_hs20_roc_res *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mvm_time_event_data *te_data = data;
+
+ if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ IWL_DEBUG_TE(mvm,
+ "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
+ resp->status, resp->event_unique_id);
+
+ te_data->uid = le32_to_cpu(resp->event_unique_id);
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+ te_data->uid);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return true;
+}
+
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration)
+{
+ int res, time_reg = DEVICE_SYSTEM_TIME_REG;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
+ static const u16 time_event_response[] = { HOT_SPOT_CMD };
+ struct iwl_notification_wait wait_time_event;
+ u32 dtim_interval = vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int;
+ u32 req_dur, delay;
+ struct iwl_hs20_roc_req aux_roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
+ .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
+ /* Set the channel info data */
+ .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
+ PHY_BAND_24 : PHY_BAND_5,
+ .channel_info.channel = channel->hw_value,
+ .channel_info.width = PHY_VHT_CHANNEL_MODE20,
+ /* Set the time and duration */
+ .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
+ };
+
+ delay = AUX_ROC_MIN_DELAY;
+ req_dur = MSEC_TO_TU(duration);
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ */
+ if (vif->bss_conf.assoc) {
+ delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= req_dur) {
+ req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (req_dur <= AUX_ROC_MIN_DURATION)
+ req_dur = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+
+ aux_roc_req.duration = cpu_to_le32(req_dur);
+ aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "ROC: Requesting to remain on channel %u for %ums\n",
+ channel->hw_value, req_dur);
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+ duration, delay, dtim_interval);
+
+ /* Set the node address */
+ memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+
+ if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
+ spin_unlock_bh(&mvm->time_event_lock);
+ return -EIO;
+ }
+
+ te_data->vif = vif;
+ te_data->duration = duration;
+ te_data->id = HOT_SPOT_CMD;
+
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * Use a notification wait, which really just processes the
+ * command response and doesn't wait for anything, in order
+ * to be able to process the response and get the UID inside
+ * the RX path. Using CMD_WANT_SKB doesn't work because it
+ * stores the buffer and then wakes up this thread, by which
+ * time another notification (that the time event started)
+ * might already be processed unsuccessfully.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+ time_event_response,
+ ARRAY_SIZE(time_event_response),
+ iwl_mvm_rx_aux_roc, te_data);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
+ &aux_roc_req);
+
+ if (res) {
+ IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
+ iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+ goto out_clear_te;
+ }
+
+ /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+ /* should never fail */
+ WARN_ON_ONCE(res);
+
+ if (res) {
+ out_clear_te:
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+
+ return res;
+}
+
+static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct cfg80211_chan_def chandef;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ bool band_change_removal;
+ int ret, i;
+
+ IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+ duration, type);
+
+ /*
+ * Flush the done work, just in case it's still pending, so that
+ * the work it does can complete and we can accept new frames.
+ */
+ flush_work(&mvm->roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
+ /* Use aux roc framework (HS20) */
+ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+ vif, duration);
+ goto out_unlock;
+ }
+ IWL_ERR(mvm, "hotspot not supported\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* handle below */
+ break;
+ default:
+ IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ phy_ctxt = &mvm->phy_ctxts[i];
+ if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
+ continue;
+
+ if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+ /*
+ * Unbind the P2P_DEVICE from the current PHY context,
+ * and if the PHY context is not used remove it.
+ */
+ ret = iwl_mvm_binding_remove_vif(mvm, vif);
+ if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+ /* Bind the P2P_DEVICE to the current PHY Context */
+ mvmvif->phy_ctxt = phy_ctxt;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+ goto schedule_time_event;
+ }
+ }
+
+ /* Need to update the PHY context only if the ROC channel changed */
+ if (channel == mvmvif->phy_ctxt->channel)
+ goto schedule_time_event;
+
+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+
+ /*
+ * Check if the remain-on-channel is on a different band and that
+ * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+ * so, we'll need to release and then re-configure here, since we
+ * must not remove a PHY context that's part of a binding.
+ */
+ band_change_removal =
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ mvmvif->phy_ctxt->channel->band != chandef.chan->band;
+
+ if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
+ /*
+ * Change the PHY context configuration as it is currently
+ * referenced only by the P2P Device MAC (and we can modify it)
+ */
+ ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
+ &chandef, 1, 1);
+ if (ret)
+ goto out_unlock;
+ } else {
+ /*
+ * The PHY context is shared with other MACs (or we're trying to
+ * switch bands), so remove the P2P Device from the binding,
+ * allocate an new PHY context and create a new binding.
+ */
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+ 1, 1);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to change PHY context\n");
+ goto out_unlock;
+ }
+
+ /* Unbind the P2P_DEVICE from the current PHY context */
+ ret = iwl_mvm_binding_remove_vif(mvm, vif);
+ if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+ /* Bind the P2P_DEVICE to the new allocated PHY context */
+ mvmvif->phy_ctxt = phy_ctxt;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ goto out_unlock;
+
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+ }
+
+schedule_time_event:
+ /* Schedule the time events */
+ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+ return ret;
+}
+
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ IWL_DEBUG_MAC80211(mvm, "enter\n");
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_stop_roc(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+ return 0;
+}
+
+static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
+
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add PHY context\n");
+ goto out;
+ }
+
+ iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
+ *phy_ctxt_id = phy_ctxt->id;
+out:
+ return ret;
+}
+
+static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = __iwl_mvm_add_chanctx(mvm, ctx);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
+}
+
+static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_remove_chanctx(mvm, ctx);
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+ if (WARN_ONCE((phy_ctxt->ref > 1) &&
+ (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
+ IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+ IEEE80211_CHANCTX_CHANGE_RADAR |
+ IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
+ "Cannot change PHY. Ref=%d, changed=0x%X\n",
+ phy_ctxt->ref, changed))
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ /* we are only changing the min_width, may be a noop */
+ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
+ if (phy_ctxt->width == ctx->min_def.width)
+ goto out_unlock;
+
+ /* we are just toggling between 20_NOHT and 20 */
+ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
+ ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
+ goto out_unlock;
+ }
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmvif->phy_ctxt = phy_ctxt;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ /* only needed if we're switching chanctx (i.e. during CSA) */
+ if (switching_chanctx) {
+ mvmvif->ap_ibss_active = true;
+ break;
+ }
+ case NL80211_IFTYPE_ADHOC:
+ /*
+ * The AP binding flow is handled as part of the start_ap flow
+ * (in bss_info_changed), similarly for IBSS.
+ */
+ ret = 0;
+ goto out;
+ case NL80211_IFTYPE_STATION:
+ mvmvif->csa_bcn_pending = false;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ /* always disable PS when a monitor interface is active */
+ mvmvif->ps_disabled = true;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out;
+
+ /*
+ * Power state must be updated before quotas,
+ * otherwise fw will complain.
+ */
+ iwl_mvm_power_update_mac(mvm);
+
+ /* Setting the quota at this stage is only required for monitor
+ * interfaces. For the other types, the bss_info changed flow
+ * will handle quota settings.
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvmvif->monitor_active = true;
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (ret)
+ goto out_remove_binding;
+
+ ret = iwl_mvm_add_snif_sta(mvm, vif);
+ if (ret)
+ goto out_remove_binding;
+
+ }
+
+ /* Handle binding during CSA */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ }
+
+ if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
+ u32 duration = 3 * vif->bss_conf.beacon_int;
+
+ /* iwl_mvm_protect_session() reads directly from the
+ * device (the system time), so make sure it is
+ * available.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
+ if (ret)
+ goto out_remove_binding;
+
+ /* Protect the session to make sure we hear the first
+ * beacon on the new channel.
+ */
+ mvmvif->csa_bcn_pending = true;
+ iwl_mvm_protect_session(mvm, vif, duration, duration,
+ vif->bss_conf.beacon_int / 2,
+ true);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
+
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+
+ goto out;
+
+out_remove_binding:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
+out:
+ if (ret)
+ mvmvif->phy_ctxt = NULL;
+ return ret;
+}
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_vif *disabled_vif = NULL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ goto out;
+ case NL80211_IFTYPE_MONITOR:
+ mvmvif->monitor_active = false;
+ mvmvif->ps_disabled = false;
+ iwl_mvm_rm_snif_sta(mvm, vif);
+ break;
+ case NL80211_IFTYPE_AP:
+ /* This part is triggered only during CSA */
+ if (!switching_chanctx || !mvmvif->ap_ibss_active)
+ goto out;
+
+ mvmvif->csa_countdown = false;
+
+ /* Set CS bit on all the stations */
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
+
+ /* Save blocked iface, the timeout is set on the next beacon */
+ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
+
+ mvmvif->ap_ibss_active = false;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!switching_chanctx)
+ break;
+
+ disabled_vif = vif;
+
+ iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
+ break;
+ default:
+ break;
+ }
+
+ iwl_mvm_update_quotas(mvm, false, disabled_vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+
+out:
+ mvmvif->phy_ctxt = NULL;
+ iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+ __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
+
+ ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
+ true);
+ if (ret) {
+ IWL_ERR(mvm,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_remove;
+ }
+
+ /* we don't support TDLS during DCM - can be caused by channel switch */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ goto out;
+
+out_remove:
+ __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
+
+out_reassign:
+ if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
+ IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
+ goto out_restart;
+ }
+
+ if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ true)) {
+ IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+ goto out_restart;
+ }
+
+ goto out;
+
+out_restart:
+ /* things keep failing, better restart the hw */
+ iwl_mvm_nic_restart(mvm, false);
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+
+ ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
+ true);
+ if (ret) {
+ IWL_ERR(mvm,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ goto out;
+
+out_reassign:
+ if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ true)) {
+ IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+ goto out_restart;
+ }
+
+ goto out;
+
+out_restart:
+ /* things keep failing, better restart the hw */
+ iwl_mvm_nic_restart(mvm, false);
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ /* we only support a single-vif right now */
+ if (n_vifs > 1)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
+ break;
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ return mvm->ibss_manager;
+}
+
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ bool set)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!mvm_sta || !mvm_sta->vif) {
+ IWL_ERR(mvm, "Station is not associated to a vif\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
+}
+
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+ [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+ [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+ int err;
+ u32 noa_duration;
+
+ err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
+ NULL);
+ if (err)
+ return err;
+
+ if (!tb[IWL_MVM_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+ case IWL_MVM_TM_CMD_SET_NOA:
+ if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+ !vif->bss_conf.enable_beacon ||
+ !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+ return -EINVAL;
+
+ noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+ if (noa_duration >= vif->bss_conf.beacon_int)
+ return -EINVAL;
+
+ mvm->noa_duration = noa_duration;
+ mvm->noa_vif = vif;
+
+ return iwl_mvm_update_quotas(mvm, true, NULL);
+ case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+ /* must be associated client vif - ignore authorized */
+ if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+ !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+ return -EINVAL;
+
+ if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+ return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int err;
+
+ mutex_lock(&mvm->mutex);
+ err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+ mutex_unlock(&mvm->mutex);
+
+ return err;
+}
+#endif
+
+static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ /* By implementing this operation, we prevent mac80211 from
+ * starting its own channel switch timer, so that we can call
+ * ieee80211_chswitch_done() ourselves at the right time
+ * (which is when the absence time event starts).
+ */
+
+ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
+ "dummy channel switch op\n");
+}
+
+static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *csa_vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 apply_time;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->csa_failed = false;
+
+ IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
+ chsw->chandef.center_freq1);
+
+ iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_CHANNEL_SWITCH);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ csa_vif =
+ rcu_dereference_protected(mvm->csa_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ONCE(csa_vif && csa_vif->csa_active,
+ "Another CSA is already in progress")) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* we still didn't unblock tx. prevent new CS meanwhile */
+ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex))) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ rcu_assign_pointer(mvm->csa_vif, vif);
+
+ if (WARN_ONCE(mvmvif->csa_countdown,
+ "Previous CSA countdown didn't complete")) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* Schedule the time event to a bit before beacon 1,
+ * to make sure we're in the new channel when the
+ * GO/AP arrives. In case count <= 1 immediately schedule the
+ * TE (this might result with some packet loss or connection
+ * loss).
+ */
+ if (chsw->count <= 1)
+ apply_time = 0;
+ else
+ apply_time = chsw->device_timestamp +
+ ((vif->bss_conf.beacon_int * (chsw->count - 1) -
+ IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
+
+ if (chsw->block_tx)
+ iwl_mvm_csa_client_absent(mvm, vif);
+
+ iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
+ apply_time);
+ if (mvmvif->bf_data.bf_enabled) {
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mvmvif->ps_disabled = true;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+ if (ret)
+ goto out_unlock;
+
+ /* we won't be on this channel any longer */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvmvif->csa_failed) {
+ mvmvif->csa_failed = false;
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmvif->csa_bcn_pending = false;
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
+ mvmvif->ap_sta_id);
+
+ if (WARN_ON(!mvmsta)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ mvmvif->ps_disabled = false;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+{
+ int i;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ if (drop) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_flush_tx_path(mvm,
+ iwl_mvm_flushable_queues(mvm) & queues, 0);
+ mutex_unlock(&mvm->mutex);
+ } else {
+ iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
+ }
+ return;
+ }
+
+ mutex_lock(&mvm->mutex);
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ if (drop)
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+ else
+ iwl_mvm_wait_sta_queues_empty(mvm,
+ iwl_mvm_sta_from_mac80211(sta));
+ }
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 queues, bool drop)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_sta *sta;
+ int i;
+ u32 msk = 0;
+
+ if (!vif) {
+ iwl_mvm_flush_no_vif(mvm, queues, drop);
+ return;
+ }
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ /* Make sure we're done with the deferred traffic before flushing */
+ flush_work(&mvm->add_stream_wk);
+
+ mutex_lock(&mvm->mutex);
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* flush the AP-station and all TDLS peers */
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->vif != vif)
+ continue;
+
+ /* make sure only TDLS peers or the AP are flushed */
+ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
+
+ if (drop) {
+ if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ msk |= mvmsta->tfd_queue_msk;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
+ }
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ if (!drop && !iwl_mvm_has_new_tx_api(mvm))
+ iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
+}
+
+static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ memset(survey, 0, sizeof(*survey));
+
+ /* only support global statistics right now */
+ if (idx != 0)
+ return -ENOENT;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ return -ENOENT;
+
+ mutex_lock(&mvm->mutex);
+
+ if (iwl_mvm_firmware_running(mvm)) {
+ ret = iwl_mvm_request_statistics(mvm, false);
+ if (ret)
+ goto out;
+ }
+
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_SCAN;
+ survey->time = mvm->accu_radio_stats.on_time_rf +
+ mvm->radio_stats.on_time_rf;
+ do_div(survey->time, USEC_PER_MSEC);
+
+ survey->time_rx = mvm->accu_radio_stats.rx_time +
+ mvm->radio_stats.rx_time;
+ do_div(survey->time_rx, USEC_PER_MSEC);
+
+ survey->time_tx = mvm->accu_radio_stats.tx_time +
+ mvm->radio_stats.tx_time;
+ do_div(survey->time_tx, USEC_PER_MSEC);
+
+ survey->time_scan = mvm->accu_radio_stats.on_time_scan +
+ mvm->radio_stats.on_time_scan;
+ do_div(survey->time_scan, USEC_PER_MSEC);
+
+ ret = 0;
+ out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (mvmsta->avg_energy) {
+ sinfo->signal_avg = mvmsta->avg_energy;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+
+ /* if beacon filtering isn't on mac80211 does it anyway */
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ if (!vif->bss_conf.assoc)
+ return;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvmvif->ap_sta_id != mvmsta->sta_id)
+ goto unlock;
+
+ if (iwl_mvm_request_statistics(mvm, false))
+ goto unlock;
+
+ sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
+ mvmvif->beacon_stats.accu_num_beacons;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
+ if (mvmvif->beacon_stats.avg_signal) {
+ /* firmware only reports a value after RXing a few beacons */
+ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ }
+ unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \
+ do { \
+ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \
+ break; \
+ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \
+ } while (0)
+
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+ trig_mlme = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ return;
+
+ if (event->u.mlme.data == ASSOC_EVENT) {
+ if (event->u.mlme.status == MLME_DENIED)
+ CHECK_MLME_TRIGGER(stop_assoc_denied,
+ "DENIED ASSOC: reason %d",
+ event->u.mlme.reason);
+ else if (event->u.mlme.status == MLME_TIMEOUT)
+ CHECK_MLME_TRIGGER(stop_assoc_timeout,
+ "ASSOC TIMEOUT");
+ } else if (event->u.mlme.data == AUTH_EVENT) {
+ if (event->u.mlme.status == MLME_DENIED)
+ CHECK_MLME_TRIGGER(stop_auth_denied,
+ "DENIED AUTH: reason %d",
+ event->u.mlme.reason);
+ else if (event->u.mlme.status == MLME_TIMEOUT)
+ CHECK_MLME_TRIGGER(stop_auth_timeout,
+ "AUTH TIMEOUT");
+ } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
+ CHECK_MLME_TRIGGER(stop_rx_deauth,
+ "DEAUTH RX %d", event->u.mlme.reason);
+ } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
+ CHECK_MLME_TRIGGER(stop_tx_deauth,
+ "DEAUTH TX %d", event->u.mlme.reason);
+ }
+#undef CHECK_MLME_TRIGGER
+}
+
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "BAR received from %pM, tid %d, ssn %d",
+ event->u.ba.sta->addr, event->u.ba.tid,
+ event->u.ba.ssn);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (event->type) {
+ case MLME_EVENT:
+ iwl_mvm_event_mlme_callback(mvm, vif, event);
+ break;
+ case BAR_RX_EVENT:
+ iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+ break;
+ case BA_FRAME_TIMEOUT:
+ iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
+ event->u.ba.tid);
+ break;
+ default:
+ break;
+ }
+}
+
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size)
+{
+ u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ notif->cookie = mvm->queue_sync_cookie;
+
+ if (notif->sync)
+ atomic_set(&mvm->queue_sync_counter,
+ mvm->trans->num_rx_queues);
+
+ ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
+ goto out;
+ }
+
+ if (notif->sync) {
+ ret = wait_event_timeout(mvm->rx_sync_waitq,
+ atomic_read(&mvm->queue_sync_counter) == 0 ||
+ iwl_mvm_is_radio_killed(mvm),
+ HZ);
+ WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
+ }
+
+out:
+ atomic_set(&mvm->queue_sync_counter, 0);
+ mvm->queue_sync_cookie++;
+}
+
+static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_internal_rxq_notif data = {
+ .type = IWL_MVM_RXQ_EMPTY,
+ .sync = 1,
+ };
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
+ mutex_unlock(&mvm->mutex);
+}
+
+const struct ieee80211_ops iwl_mvm_hw_ops = {
+ .tx = iwl_mvm_mac_tx,
+ .ampdu_action = iwl_mvm_mac_ampdu_action,
+ .start = iwl_mvm_mac_start,
+ .reconfig_complete = iwl_mvm_mac_reconfig_complete,
+ .stop = iwl_mvm_mac_stop,
+ .add_interface = iwl_mvm_mac_add_interface,
+ .remove_interface = iwl_mvm_mac_remove_interface,
+ .config = iwl_mvm_mac_config,
+ .prepare_multicast = iwl_mvm_prepare_multicast,
+ .configure_filter = iwl_mvm_configure_filter,
+ .config_iface_filter = iwl_mvm_config_iface_filter,
+ .bss_info_changed = iwl_mvm_bss_info_changed,
+ .hw_scan = iwl_mvm_mac_hw_scan,
+ .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
+ .sta_state = iwl_mvm_mac_sta_state,
+ .sta_notify = iwl_mvm_mac_sta_notify,
+ .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
+ .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .sta_rc_update = iwl_mvm_sta_rc_update,
+ .conf_tx = iwl_mvm_mac_conf_tx,
+ .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
+ .flush = iwl_mvm_mac_flush,
+ .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+ .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
+ .set_key = iwl_mvm_mac_set_key,
+ .update_tkip_key = iwl_mvm_mac_update_tkip_key,
+ .remain_on_channel = iwl_mvm_roc,
+ .cancel_remain_on_channel = iwl_mvm_cancel_roc,
+ .add_chanctx = iwl_mvm_add_chanctx,
+ .remove_chanctx = iwl_mvm_remove_chanctx,
+ .change_chanctx = iwl_mvm_change_chanctx,
+ .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+ .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
+
+ .start_ap = iwl_mvm_start_ap_ibss,
+ .stop_ap = iwl_mvm_stop_ap_ibss,
+ .join_ibss = iwl_mvm_start_ap_ibss,
+ .leave_ibss = iwl_mvm_stop_ap_ibss,
+
+ .tx_last_beacon = iwl_mvm_tx_last_beacon,
+
+ .set_tim = iwl_mvm_set_tim,
+
+ .channel_switch = iwl_mvm_channel_switch,
+ .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .post_channel_switch = iwl_mvm_post_channel_switch,
+
+ .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
+ .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
+ .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
+
+ .event_callback = iwl_mvm_mac_event_callback,
+
+ .sync_rx_queues = iwl_mvm_sync_rx_queues,
+
+ CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
+#ifdef CONFIG_PM_SLEEP
+ /* look at d3.c */
+ .suspend = iwl_mvm_suspend,
+ .resume = iwl_mvm_resume,
+ .set_wakeup = iwl_mvm_set_wakeup,
+ .set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+ .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+ .get_survey = iwl_mvm_mac_get_survey,
+ .sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
new file mode 100644
index 000000000..6b65ad6c9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -0,0 +1,2002 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_H__
+#define __IWL_MVM_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <linux/in6.h>
+
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+#endif
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+#include "fw/notif-wait.h"
+#include "iwl-eeprom-parse.h"
+#include "fw/file.h"
+#include "iwl-config.h"
+#include "sta.h"
+#include "fw-api.h"
+#include "constants.h"
+#include "tof.h"
+#include "fw/runtime.h"
+#include "fw/dbg.h"
+#include "fw/acpi.h"
+#include "iwl-nvm-parse.h"
+
+#include <linux/average.h>
+
+#define IWL_MVM_MAX_ADDRESSES 5
+/* RSSI offset for WkP */
+#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+/* A TimeUnit is 1024 microsecond */
+#define MSEC_TO_TU(_msec) (_msec*1000/1024)
+
+/* For GO, this value represents the number of TUs before CSA "beacon
+ * 0" TBTT when the CSA time-event needs to be scheduled to start. It
+ * must be big enough to ensure that we switch in time.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
+
+/* For client, this value represents the number of TUs before CSA
+ * "beacon 1" TBTT, instead. This is because we don't know when the
+ * GO/AP will be in the new channel, so we switch early enough.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
+
+/*
+ * This value (in TUs) is used to fine tune the CSA NoA end time which should
+ * be just before "beacon 0" TBTT.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
+
+/*
+ * Number of beacons to transmit on a new channel until we unblock tx to
+ * the stations, even if we didn't identify them on a new channel
+ */
+#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
+
+/* offchannel queue towards mac80211 */
+#define IWL_MVM_OFFCHANNEL_QUEUE 0
+
+extern const struct ieee80211_ops iwl_mvm_hw_ops;
+
+/**
+ * struct iwl_mvm_mod_params - module parameters for iwlmvm
+ * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
+ * We will register to mac80211 to have testmode working. The NIC must not
+ * be up'ed after the INIT fw asserted. This is useful to be able to use
+ * proprietary tools over testmode to debug the INIT fw.
+ * @tfd_q_hang_detect: enabled the detection of hung transmit queues
+ * @power_scheme: one of enum iwl_power_scheme
+ */
+struct iwl_mvm_mod_params {
+ bool init_dbg;
+ bool tfd_q_hang_detect;
+ int power_scheme;
+};
+extern struct iwl_mvm_mod_params iwlmvm_mod_params;
+
+struct iwl_mvm_phy_ctxt {
+ u16 id;
+ u16 color;
+ u32 ref;
+
+ enum nl80211_chan_width width;
+
+ /*
+ * TODO: This should probably be removed. Currently here only for rate
+ * scaling algorithm
+ */
+ struct ieee80211_channel *channel;
+};
+
+struct iwl_mvm_time_event_data {
+ struct ieee80211_vif *vif;
+ struct list_head list;
+ unsigned long end_jiffies;
+ u32 duration;
+ bool running;
+ u32 uid;
+
+ /*
+ * The access to the 'id' field must be done when the
+ * mvm->time_event_lock is held, as it value is used to indicate
+ * if the te is in the time event list or not (when id == TE_MAX)
+ */
+ u32 id;
+};
+
+ /* Power management */
+
+/**
+ * enum iwl_power_scheme
+ * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
+ * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
+ * @IWL_POWER_LEVEL_LP - Low Power
+ */
+enum iwl_power_scheme {
+ IWL_POWER_SCHEME_CAM = 1,
+ IWL_POWER_SCHEME_BPS,
+ IWL_POWER_SCHEME_LP
+};
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
+#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+enum iwl_dbgfs_pm_mask {
+ MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
+ MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
+ MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
+ MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
+ MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
+ MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
+ MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+ MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+ MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
+ MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10),
+};
+
+struct iwl_dbgfs_pm {
+ u16 keep_alive_seconds;
+ u32 rx_data_timeout;
+ u32 tx_data_timeout;
+ bool skip_over_dtim;
+ u8 skip_dtim_periods;
+ bool lprx_ena;
+ u32 lprx_rssi_threshold;
+ bool snooze_ena;
+ bool uapsd_misbehaving;
+ bool use_ps_poll;
+ int mask;
+};
+
+/* beacon filtering */
+
+enum iwl_dbgfs_bf_mask {
+ MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
+ MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
+ MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
+ MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
+ MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
+ MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
+ MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
+ MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
+ MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
+ MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
+ MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
+};
+
+struct iwl_dbgfs_bf {
+ u32 bf_energy_delta;
+ u32 bf_roaming_energy_delta;
+ u32 bf_roaming_state;
+ u32 bf_temp_threshold;
+ u32 bf_temp_fast_filter;
+ u32 bf_temp_slow_filter;
+ u32 bf_enable_beacon_filter;
+ u32 bf_debug_flag;
+ u32 bf_escape_timer;
+ u32 ba_escape_timer;
+ u32 ba_enable_beacon_abort;
+ int mask;
+};
+#endif
+
+enum iwl_mvm_smps_type_request {
+ IWL_MVM_SMPS_REQ_BT_COEX,
+ IWL_MVM_SMPS_REQ_TT,
+ IWL_MVM_SMPS_REQ_PROT,
+ NUM_IWL_MVM_SMPS_REQ,
+};
+
+enum iwl_mvm_ref_type {
+ IWL_MVM_REF_UCODE_DOWN,
+ IWL_MVM_REF_SCAN,
+ IWL_MVM_REF_ROC,
+ IWL_MVM_REF_ROC_AUX,
+ IWL_MVM_REF_P2P_CLIENT,
+ IWL_MVM_REF_AP_IBSS,
+ IWL_MVM_REF_USER,
+ IWL_MVM_REF_TX,
+ IWL_MVM_REF_TX_AGG,
+ IWL_MVM_REF_ADD_IF,
+ IWL_MVM_REF_START_AP,
+ IWL_MVM_REF_BSS_CHANGED,
+ IWL_MVM_REF_PREPARE_TX,
+ IWL_MVM_REF_PROTECT_TDLS,
+ IWL_MVM_REF_CHECK_CTKILL,
+ IWL_MVM_REF_PRPH_READ,
+ IWL_MVM_REF_PRPH_WRITE,
+ IWL_MVM_REF_NMI,
+ IWL_MVM_REF_TM_CMD,
+ IWL_MVM_REF_EXIT_WORK,
+ IWL_MVM_REF_PROTECT_CSA,
+ IWL_MVM_REF_FW_DBG_COLLECT,
+ IWL_MVM_REF_INIT_UCODE,
+ IWL_MVM_REF_SENDING_CMD,
+ IWL_MVM_REF_RX,
+
+ /* update debugfs.c when changing this */
+
+ IWL_MVM_REF_COUNT,
+};
+
+enum iwl_bt_force_ant_mode {
+ BT_FORCE_ANT_DIS = 0,
+ BT_FORCE_ANT_AUTO,
+ BT_FORCE_ANT_BT,
+ BT_FORCE_ANT_WIFI,
+
+ BT_FORCE_ANT_MAX,
+};
+
+/**
+* struct iwl_mvm_low_latency_cause - low latency set causes
+* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
+* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
+* @LOW_LATENCY_VCMD: low latency mode set from vendor command
+*/
+enum iwl_mvm_low_latency_cause {
+ LOW_LATENCY_TRAFFIC = BIT(0),
+ LOW_LATENCY_DEBUGFS = BIT(1),
+ LOW_LATENCY_VCMD = BIT(2),
+};
+
+/**
+* struct iwl_mvm_vif_bf_data - beacon filtering related data
+* @bf_enabled: indicates if beacon filtering is enabled
+* @ba_enabled: indicated if beacon abort is enabled
+* @ave_beacon_signal: average beacon signal
+* @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
+*/
+struct iwl_mvm_vif_bf_data {
+ bool bf_enabled;
+ bool ba_enabled;
+ int ave_beacon_signal;
+ int last_cqm_event;
+ int bt_coex_min_thold;
+ int bt_coex_max_thold;
+ int last_bt_coex_event;
+};
+
+/**
+ * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @id: between 0 and 3
+ * @color: to solve races upon MAC addition and removal
+ * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
+ * @bssid: BSSID for this (client) interface
+ * @associated: indicates that we're currently associated, used only for
+ * managing the firmware state in iwl_mvm_bss_info_changed_station()
+ * @ap_assoc_sta_count: count of stations associated to us - valid only
+ * if VIF type is AP
+ * @uploaded: indicates the MAC context has been added to the device
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ * should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
+ * @monitor_active: indicates that monitor context is configured, and that the
+ * interface should get quota etc.
+ * @low_latency: indicates low latency is set, see
+ * enum &iwl_mvm_low_latency_cause for causes.
+ * @ps_disabled: indicates that this interface requires PS to be disabled
+ * @queue_params: QoS params for this MAC
+ * @bcast_sta: station used for broadcast packets. Used by the following
+ * vifs: P2P_DEVICE, GO and AP.
+ * @beacon_skb: the skb used to hold the AP/GO beacon template
+ * @smps_requests: the SMPS requests of different parts of the driver,
+ * combined on update to yield the overall request to mac80211.
+ * @beacon_stats: beacon statistics, containing the # of received beacons,
+ * # of received beacons accumulated over FW restart, and the current
+ * average signal of beacons retrieved from the firmware
+ * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
+ */
+struct iwl_mvm_vif {
+ struct iwl_mvm *mvm;
+ u16 id;
+ u16 color;
+ u8 ap_sta_id;
+
+ u8 bssid[ETH_ALEN];
+ bool associated;
+ u8 ap_assoc_sta_count;
+
+ u16 cab_queue;
+
+ bool uploaded;
+ bool ap_ibss_active;
+ bool pm_enabled;
+ bool monitor_active;
+ u8 low_latency;
+ bool ps_disabled;
+ struct iwl_mvm_vif_bf_data bf_data;
+
+ struct {
+ u32 num_beacons, accu_num_beacons;
+ u8 avg_signal;
+ } beacon_stats;
+
+ u32 ap_beacon_time;
+
+ enum iwl_tsf_id tsf_id;
+
+ /*
+ * QoS data from mac80211, need to store this here
+ * as mac80211 has a separate callback but we need
+ * to have the data for the MAC context
+ */
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct iwl_mvm_time_event_data time_event_data;
+ struct iwl_mvm_time_event_data hs_time_event_data;
+
+ struct iwl_mvm_int_sta bcast_sta;
+ struct iwl_mvm_int_sta mcast_sta;
+
+ /*
+ * Assigned while mac80211 has the interface in a channel context,
+ * or, for P2P Device, while it exists.
+ */
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+
+#ifdef CONFIG_PM
+ /* WoWLAN GTK rekey data */
+ struct {
+ u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+ bool valid;
+ } rekey_data;
+
+ int tx_key_idx;
+
+ bool seqno_valid;
+ u16 seqno;
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 addresses for WoWLAN */
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
+ unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+ int num_target_ipv6_addrs;
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_slink;
+ struct iwl_dbgfs_pm dbgfs_pm;
+ struct iwl_dbgfs_bf dbgfs_bf;
+ struct iwl_mac_power_cmd mac_pwr_cmd;
+ int dbgfs_quota_min;
+#endif
+
+ enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+
+ /* FW identified misbehaving AP */
+ u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+ struct delayed_work uapsd_nonagg_detected_wk;
+
+ /* Indicates that CSA countdown may be started */
+ bool csa_countdown;
+ bool csa_failed;
+ u16 csa_target_freq;
+
+ /* Indicates that we are waiting for a beacon on a new channel */
+ bool csa_bcn_pending;
+
+ /* TCP Checksum Offload */
+ netdev_features_t features;
+};
+
+static inline struct iwl_mvm_vif *
+iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+ if (!vif)
+ return NULL;
+ return (void *)vif->drv_priv;
+}
+
+extern const u8 tid_to_mac80211_ac[];
+
+#define IWL_MVM_SCAN_STOPPING_SHIFT 8
+
+enum iwl_scan_status {
+ IWL_MVM_SCAN_REGULAR = BIT(0),
+ IWL_MVM_SCAN_SCHED = BIT(1),
+ IWL_MVM_SCAN_NETDETECT = BIT(2),
+
+ IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
+ IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
+ IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+
+ IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
+ IWL_MVM_SCAN_STOPPING_REGULAR,
+ IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
+ IWL_MVM_SCAN_STOPPING_SCHED,
+ IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
+ IWL_MVM_SCAN_STOPPING_NETDETECT,
+
+ IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+ IWL_MVM_SCAN_MASK = 0xff,
+};
+
+enum iwl_mvm_scan_type {
+ IWL_SCAN_TYPE_NOT_SET,
+ IWL_SCAN_TYPE_UNASSOC,
+ IWL_SCAN_TYPE_WILD,
+ IWL_SCAN_TYPE_MILD,
+ IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_sched_scan_pass_all_states {
+ SCHED_SCAN_PASS_ALL_DISABLED,
+ SCHED_SCAN_PASS_ALL_ENABLED,
+ SCHED_SCAN_PASS_ALL_FOUND,
+};
+
+/**
+ * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * @ct_kill_exit: worker to exit thermal kill
+ * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
+ * @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
+ * @params: Parameters to configure the thermal throttling algorithm.
+ * @throttle: Is thermal throttling is active?
+ */
+struct iwl_mvm_tt_mgmt {
+ struct delayed_work ct_kill_exit;
+ bool dynamic_smps;
+ u32 tx_backoff;
+ u32 min_backoff;
+ struct iwl_tt_params params;
+ bool throttle;
+};
+
+#ifdef CONFIG_THERMAL
+/**
+ *struct iwl_mvm_thermal_device - thermal zone related data
+ * @temp_trips: temperature thresholds for report
+ * @fw_trips_index: keep indexes to original array - temp_trips
+ * @tzone: thermal zone device data
+*/
+struct iwl_mvm_thermal_device {
+ s16 temp_trips[IWL_MAX_DTS_TRIPS];
+ u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
+ struct thermal_zone_device *tzone;
+};
+
+/*
+ * struct iwl_mvm_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mvm_cooling_device {
+ u32 cur_state;
+ struct thermal_cooling_device *cdev;
+};
+#endif
+
+#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
+
+struct iwl_mvm_frame_stats {
+ u32 legacy_frames;
+ u32 ht_frames;
+ u32 vht_frames;
+ u32 bw_20_frames;
+ u32 bw_40_frames;
+ u32 bw_80_frames;
+ u32 bw_160_frames;
+ u32 sgi_frames;
+ u32 ngi_frames;
+ u32 siso_frames;
+ u32 mimo2_frames;
+ u32 agg_frames;
+ u32 ampdu_count;
+ u32 success_frames;
+ u32 fail_frames;
+ u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
+ int last_frame_idx;
+};
+
+enum {
+ D0I3_DEFER_WAKEUP,
+ D0I3_PENDING_WAKEUP,
+};
+
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
+
+enum iwl_mvm_tdls_cs_state {
+ IWL_MVM_TDLS_SW_IDLE = 0,
+ IWL_MVM_TDLS_SW_REQ_SENT,
+ IWL_MVM_TDLS_SW_RESP_RCVD,
+ IWL_MVM_TDLS_SW_REQ_RCVD,
+ IWL_MVM_TDLS_SW_ACTIVE,
+};
+
+enum iwl_mvm_traffic_load {
+ IWL_MVM_TRAFFIC_LOW,
+ IWL_MVM_TRAFFIC_MEDIUM,
+ IWL_MVM_TRAFFIC_HIGH,
+};
+
+DECLARE_EWMA(rate, 16, 16)
+
+struct iwl_mvm_tcm_mac {
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ } tx;
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ u32 last_ampdu_ref;
+ } rx;
+ struct {
+ /* track AP's transfer in client mode */
+ u64 rx_bytes;
+ struct ewma_rate rate;
+ bool detected;
+ } uapsd_nonagg_detect;
+ bool opened_rx_ba_sessions;
+};
+
+struct iwl_mvm_tcm {
+ struct delayed_work work;
+ spinlock_t lock; /* used when time elapsed */
+ unsigned long ts; /* timestamp when period ends */
+ unsigned long ll_ts;
+ unsigned long uapsd_nonagg_ts;
+ bool paused;
+ struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
+ struct {
+ u32 elapsed; /* milliseconds for this TCM period */
+ u32 airtime[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS];
+ enum iwl_mvm_traffic_load global_load;
+ bool low_latency[NUM_MAC_INDEX_DRIVER];
+ bool change[NUM_MAC_INDEX_DRIVER];
+ bool global_change;
+ } result;
+};
+
+/**
+ * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
+ * @lock: protect reorder buffer internal state
+ * @mvm: mvm pointer, needed for frame timer context
+ */
+struct iwl_mvm_reorder_buffer {
+ u16 head_sn;
+ u16 num_stored;
+ u16 buf_size;
+ int queue;
+ u16 last_amsdu;
+ u8 last_sub_index;
+ struct timer_list reorder_timer;
+ bool removed;
+ bool valid;
+ spinlock_t lock;
+ struct iwl_mvm *mvm;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct _iwl_mvm_reorder_buf_entry {
+ struct sk_buff_head frames;
+ unsigned long reorder_time;
+};
+
+/* make this indirection to get the aligned thing */
+struct iwl_mvm_reorder_buf_entry {
+ struct _iwl_mvm_reorder_buf_entry e;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
+#endif
+;
+
+/**
+ * struct iwl_mvm_baid_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid baid of the session
+ * @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue, this actually gets
+ * aligned up to avoid cache line sharing between queues
+ * @last_rx: last rx jiffies, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @mvm: mvm pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ * @reorder_buf_data: data
+ */
+struct iwl_mvm_baid_data {
+ struct rcu_head rcu_head;
+ u8 sta_id;
+ u8 tid;
+ u8 baid;
+ u16 timeout;
+ u16 entries_per_queue;
+ unsigned long last_rx;
+ struct timer_list session_timer;
+ struct iwl_mvm_baid_data __rcu **rcu_ptr;
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+ struct iwl_mvm_reorder_buf_entry entries[];
+};
+
+static inline struct iwl_mvm_baid_data *
+iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
+{
+ return (void *)((u8 *)buf -
+ offsetof(struct iwl_mvm_baid_data, reorder_buf) -
+ sizeof(*buf) * buf->queue);
+}
+
+/*
+ * enum iwl_mvm_queue_status - queue status
+ * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
+ * Basically, this means that this queue can be used for any purpose
+ * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
+ * This is the state of a queue that has been dedicated for some RATID
+ * (agg'd or not), but that hasn't yet gone through the actual enablement
+ * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
+ * Note that in this state there is no requirement to already know what TID
+ * should be used with this queue, it is just marked as a queue that will
+ * be used, and shouldn't be allocated to anyone else.
+ * @IWL_MVM_QUEUE_READY: queue is ready to be used
+ * This is the state of a queue that has been fully configured (including
+ * SCD pointers, etc), has a specific RA/TID assigned to it, and can be
+ * used to send traffic.
+ * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared
+ * This is a state in which a single queue serves more than one TID, all of
+ * which are not aggregated. Note that the queue is only associated to one
+ * RA.
+ * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
+ * This is a state of a queue that has had traffic on it, but during the
+ * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
+ * it. In this state, when a new queue is needed to be allocated but no
+ * such free queue exists, an inactive queue might be freed and given to
+ * the new RA/TID.
+ * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
+ * This is the state of a queue that has had traffic pass through it, but
+ * needs to be reconfigured for some reason, e.g. the queue needs to
+ * become unshared and aggregations re-enabled on.
+ */
+enum iwl_mvm_queue_status {
+ IWL_MVM_QUEUE_FREE,
+ IWL_MVM_QUEUE_RESERVED,
+ IWL_MVM_QUEUE_READY,
+ IWL_MVM_QUEUE_SHARED,
+ IWL_MVM_QUEUE_INACTIVE,
+ IWL_MVM_QUEUE_RECONFIGURING,
+};
+
+#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
+#define IWL_MVM_INVALID_QUEUE 0xFFFF
+
+#define IWL_MVM_NUM_CIPHERS 10
+
+struct iwl_mvm_sar_profile {
+ bool enabled;
+ u8 table[ACPI_SAR_TABLE_SIZE];
+};
+
+struct iwl_mvm_geo_profile {
+ u8 values[ACPI_GEO_TABLE_SIZE];
+};
+
+struct iwl_mvm {
+ /* for logger access */
+ struct device *dev;
+
+ struct iwl_trans *trans;
+ const struct iwl_fw *fw;
+ const struct iwl_cfg *cfg;
+ struct iwl_phy_db *phy_db;
+ struct ieee80211_hw *hw;
+
+ /* for protecting access to iwl_mvm */
+ struct mutex mutex;
+ struct list_head async_handlers_list;
+ spinlock_t async_handlers_lock;
+ struct work_struct async_handlers_wk;
+
+ struct work_struct roc_done_wk;
+
+ unsigned long init_status;
+
+ unsigned long status;
+
+ u32 queue_sync_cookie;
+ atomic_t queue_sync_counter;
+ /*
+ * for beacon filtering -
+ * currently only one interface can be supported
+ */
+ struct iwl_mvm_vif *bf_allowed_vif;
+
+ bool hw_registered;
+ bool calibrating;
+ u32 error_event_table[2];
+ u32 log_event_table;
+ u32 umac_error_event_table;
+ bool support_umac_log;
+
+ u32 ampdu_ref;
+ bool ampdu_toggle;
+
+ struct iwl_notif_wait_data notif_wait;
+
+ union {
+ struct mvm_statistics_rx_v3 rx_stats_v3;
+ struct mvm_statistics_rx rx_stats;
+ };
+
+ struct {
+ u64 rx_time;
+ u64 tx_time;
+ u64 on_time_rf;
+ u64 on_time_scan;
+ } radio_stats, accu_radio_stats;
+
+ u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
+
+ struct {
+ u8 hw_queue_refcount;
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+ bool reserved; /* Is this the TXQ reserved for a STA */
+ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
+ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ /* Timestamp for inactivation per TID of this queue */
+ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
+ enum iwl_mvm_queue_status status;
+ } queue_info[IWL_MAX_HW_QUEUES];
+ spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+ struct work_struct add_stream_wk; /* To add streams to queues */
+
+ atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
+
+ const char *nvm_file_name;
+ struct iwl_nvm_data *nvm_data;
+ /* NVM sections */
+ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
+
+ struct iwl_fw_runtime fwrt;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
+
+ /* data related to data path */
+ struct iwl_rx_phy_info last_phy_info;
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+ unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+ u8 rx_ba_sessions;
+
+ /* configured by mac80211 */
+ u32 rts_threshold;
+
+ /* Scan status, cmd (pre-allocated) and auxiliary station */
+ unsigned int scan_status;
+ void *scan_cmd;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* For CDB this is low band scan type, for non-CDB - type. */
+ enum iwl_mvm_scan_type scan_type;
+ enum iwl_mvm_scan_type hb_scan_type;
+
+ enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+ struct delayed_work scan_timeout_dwork;
+
+ /* max number of simultaneous scans the FW supports */
+ unsigned int max_scans;
+
+ /* UMAC scan tracking */
+ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+
+ /* start time of last scan in TSF of the mac that requested the scan */
+ u64 scan_start;
+
+ /* the vif that requested the current scan */
+ struct iwl_mvm_vif *scan_vif;
+
+ /* rx chain antennas set through debugfs for the scan command */
+ u8 scan_rx_ant;
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* broadcast filters to configure for each associated station */
+ const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ bool override;
+ struct iwl_bcast_filter_cmd cmd;
+ } dbgfs_bcast_filtering;
+#endif
+#endif
+
+ /* Internal station */
+ struct iwl_mvm_int_sta aux_sta;
+ struct iwl_mvm_int_sta snif_sta;
+
+ bool last_ebs_successful;
+
+ u8 scan_last_antenna_idx; /* to toggle TX between antennas */
+ u8 mgmt_last_antenna_idx;
+
+ /* last smart fifo state that was successfully sent to firmware */
+ enum iwl_sf_state sf_state;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ u32 dbgfs_prph_reg_addr;
+ bool disable_power_off;
+ bool disable_power_off_d3;
+
+ bool scan_iter_notif_enabled;
+
+ struct debugfs_blob_wrapper nvm_hw_blob;
+ struct debugfs_blob_wrapper nvm_sw_blob;
+ struct debugfs_blob_wrapper nvm_calib_blob;
+ struct debugfs_blob_wrapper nvm_prod_blob;
+ struct debugfs_blob_wrapper nvm_phy_sku_blob;
+
+ struct iwl_mvm_frame_stats drv_rx_stats;
+ spinlock_t drv_stats_lock;
+ u16 dbgfs_rx_phyinfo;
+#endif
+
+ struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
+
+ struct list_head time_event_list;
+ spinlock_t time_event_lock;
+
+ /*
+ * A bitmap indicating the index of the key in use. The firmware
+ * can hold 16 keys at most. Reflect this fact.
+ */
+ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ u8 fw_key_deleted[STA_KEY_MAX_NUM];
+
+ /* references taken by the driver and spinlock protecting them */
+ spinlock_t refs_lock;
+ u8 refs[IWL_MVM_REF_COUNT];
+
+ u8 vif_count;
+
+ /* -1 for always, 0 for never, >0 for that many times */
+ s8 fw_restart;
+
+#ifdef CONFIG_IWLWIFI_LEDS
+ struct led_classdev led;
+#endif
+
+ struct ieee80211_vif *p2p_device_vif;
+
+#ifdef CONFIG_PM
+ struct wiphy_wowlan_support wowlan;
+ int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+
+ /* sched scan settings for net detect */
+ struct ieee80211_scan_ies nd_ies;
+ struct cfg80211_match_set *nd_match_sets;
+ int n_nd_match_sets;
+ struct ieee80211_channel **nd_channels;
+ int n_nd_channels;
+ bool net_detect;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ bool d3_wake_sysassert;
+ bool d3_test_active;
+ bool store_d3_resume_sram;
+ void *d3_resume_sram;
+ u32 d3_test_pme_ptr;
+ struct ieee80211_vif *keep_vif;
+ u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
+#endif
+#endif
+
+ /* d0i3 */
+ u8 d0i3_ap_sta_id;
+ bool d0i3_offloading;
+ struct work_struct d0i3_exit_work;
+ struct sk_buff_head d0i3_tx;
+ /* protect d0i3_suspend_flags */
+ struct mutex d0i3_suspend_mutex;
+ unsigned long d0i3_suspend_flags;
+ /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+ spinlock_t d0i3_tx_lock;
+ wait_queue_head_t d0i3_exit_waitq;
+ wait_queue_head_t rx_sync_waitq;
+
+ /* BT-Coex */
+ struct iwl_bt_coex_profile_notif last_bt_notif;
+ struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+
+ u8 bt_tx_prio;
+ enum iwl_bt_force_ant_mode bt_force_ant_mode;
+
+ /* Aux ROC */
+ struct list_head aux_roc_te_list;
+
+ /* Thermal Throttling and CTkill */
+ struct iwl_mvm_tt_mgmt thermal_throttle;
+#ifdef CONFIG_THERMAL
+ struct iwl_mvm_thermal_device tz_device;
+ struct iwl_mvm_cooling_device cooling_dev;
+#endif
+
+ s32 temperature; /* Celsius */
+ /*
+ * Debug option to set the NIC temperature. This option makes the
+ * driver think this is the actual NIC temperature, and ignore the
+ * real temperature that is received from the fw
+ */
+ bool temperature_test; /* Debug test temperature is enabled */
+
+ unsigned long bt_coex_last_tcm_ts;
+ struct iwl_mvm_tcm tcm;
+
+ u8 uapsd_noagg_bssid_write_idx;
+ struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM]
+ __aligned(2);
+
+ struct iwl_time_quota_cmd last_quota_cmd;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ u32 noa_duration;
+ struct ieee80211_vif *noa_vif;
+#endif
+
+ /* Tx queues */
+ u16 aux_queue;
+ u16 snif_queue;
+ u16 probe_queue;
+ u16 p2p_dev_queue;
+
+ /* Indicate if device power save is allowed */
+ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
+ unsigned int max_amsdu_len; /* used for debugfs only */
+
+ struct ieee80211_vif __rcu *csa_vif;
+ struct ieee80211_vif __rcu *csa_tx_blocked_vif;
+ u8 csa_tx_block_bcn_timeout;
+
+ /* system time of last beacon (for AP/GO interface) */
+ u32 ap_last_beacon_gp2;
+
+ /* indicates that we transmitted the last beacon */
+ bool ibss_manager;
+
+ bool lar_regdom_set;
+ enum iwl_mcc_source mcc_src;
+
+ /* TDLS channel switch data */
+ struct {
+ struct delayed_work dwork;
+ enum iwl_mvm_tdls_cs_state state;
+
+ /*
+ * Current cs sta - might be different from periodic cs peer
+ * station. Value is meaningless when the cs-state is idle.
+ */
+ u8 cur_sta_id;
+
+ /* TDLS periodic channel-switch peer */
+ struct {
+ u8 sta_id;
+ u8 op_class;
+ bool initiator; /* are we the link initiator */
+ struct cfg80211_chan_def chandef;
+ struct sk_buff *skb; /* ch sw template */
+ u32 ch_sw_tm_ie;
+
+ /* timestamp of last ch-sw request sent (GP2 time) */
+ u32 sent_timestamp;
+ } peer;
+ } tdls_cs;
+
+
+ u32 ciphers[IWL_MVM_NUM_CIPHERS];
+ struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
+ struct iwl_mvm_tof_data tof_data;
+
+ struct ieee80211_vif *nan_vif;
+#define IWL_MAX_BAID 32
+ struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
+
+ /*
+ * Drop beacons from other APs in AP mode when there are no connected
+ * clients.
+ */
+ bool drop_bcn_ap_mode;
+
+ struct delayed_work cs_tx_unblock_dwork;
+
+ /* does a monitor vif exist (only one can exist hence bool) */
+ bool monitor_on;
+#ifdef CONFIG_ACPI
+ struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+#endif
+};
+
+/* Extract MVM priv from op_mode and _hw */
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
+ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MVM(_hw) \
+ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+
+/**
+ * enum iwl_mvm_status - MVM status bits
+ * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
+ * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
+ * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
+ * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
+ * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
+ * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
+ * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
+ * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
+ */
+enum iwl_mvm_status {
+ IWL_MVM_STATUS_HW_RFKILL,
+ IWL_MVM_STATUS_HW_CTKILL,
+ IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ IWL_MVM_STATUS_IN_HW_RESTART,
+ IWL_MVM_STATUS_IN_D0I3,
+ IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ IWL_MVM_STATUS_D3_RECONFIG,
+ IWL_MVM_STATUS_FIRMWARE_RUNNING,
+ IWL_MVM_STATUS_NEED_FLUSH_P2P,
+};
+
+/* Keep track of completed init configuration */
+enum iwl_mvm_init_status {
+ IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
+ IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
+ IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
+ IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
+};
+
+static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
+ test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+}
+
+static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+}
+
+static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+}
+
+/* Must be called with rcu_read_lock() held and it can only be
+ * released when mvmsta is not needed anymore.
+ */
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
+{
+ return !iwlwifi_mod_params.d0i3_disable &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+}
+
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
+static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2);
+}
+
+static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
+{
+ /* OCE should never be enabled for LMAC scan FWs */
+ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE);
+}
+
+static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
+{
+ /* For now we only use this mode to differentiate between
+ * slave transports, which handle D0i3 entry in suspend by
+ * themselves in conjunction with runtime PM D0i3. So, this
+ * function is used to check whether we need to do anything
+ * when entering suspend or if the transport layer has already
+ * done it.
+ */
+ return (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) &&
+ (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
+}
+
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+}
+
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+}
+
+static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
+{
+ bool nvm_lar = mvm->nvm_data->lar_enabled;
+ bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+ if (iwlwifi_mod_params.lar_disable)
+ return false;
+
+ /*
+ * Enable LAR only if it is supported by the FW (TLV) &&
+ * enabled in the NVM
+ */
+ if (mvm->cfg->nvm_type == IWL_NVM_EXT)
+ return nvm_lar && tlv_lar;
+ else
+ return tlv_lar;
+}
+
+static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
+}
+
+static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+ IWL_MVM_BT_COEX_RRC;
+}
+
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
+ !IWL_MVM_HW_CSUM_DISABLE;
+}
+
+static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) &&
+ IWL_MVM_BT_COEX_MPLUT;
+}
+
+static inline
+bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) &&
+ !(iwlwifi_mod_params.uapsd_disable &
+ IWL_DISABLE_UAPSD_P2P_CLIENT);
+}
+
+static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
+}
+
+static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
+{
+ /* TODO - replace with TLV once defined */
+ return mvm->trans->cfg->use_tfh;
+}
+
+static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
+{
+ /* TODO - better define this */
+ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
+static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
+{
+ /*
+ * TODO:
+ * The issue of how to determine CDB APIs and usage is still not fully
+ * defined.
+ * There is a compilation for CDB and non-CDB FW, but there may
+ * be also runtime check.
+ * For now there is a TLV for checking compilation mode, but a
+ * runtime check will also have to be here - once defined.
+ */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+}
+
+static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
+{
+ /*
+ * TODO: should this be the same as iwl_mvm_is_cdb_supported()?
+ * but then there's a little bit of code in scan that won't make
+ * any sense...
+ */
+ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
+static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NEW_RX_STATS);
+}
+
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+}
+
+static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
+}
+
+static inline struct agg_tx_status *
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
+ else
+ return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
+}
+
+static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_THERMAL
+ /* these two TLV are redundant since the responsibility to CT-kill by
+ * FW happens only after we send at least one command of
+ * temperature THs report.
+ */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
+#else /* CONFIG_THERMAL */
+ return false;
+#endif /* CONFIG_THERMAL */
+}
+
+static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
+}
+
+extern const u8 iwl_mvm_ac_to_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+
+static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
+ enum ieee80211_ac_numbers ac)
+{
+ return iwl_mvm_has_new_tx_api(mvm) ?
+ iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+}
+
+struct iwl_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+};
+
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
+int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
+
+/******************
+ * MVM Methods
+ ******************/
+/* uCode */
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+
+/* Utils */
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r);
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+u8 first_antenna(u8 mask);
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+
+/* Tx / Host Commands */
+int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
+ u32 flags, u16 len, const void *data);
+int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd,
+ u32 *status);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
+ u16 len, const void *data,
+ u32 *status);
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id);
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc);
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status);
+#else
+static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+ u16 tids, u32 flags);
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+}
+
+static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
+{
+ flush_work(&mvm->async_handlers_wk);
+}
+
+/* Statistics */
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
+
+/* NVM */
+int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
+
+static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
+{
+ return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
+ mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
+ mvm->fw->valid_tx_ant;
+}
+
+static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
+{
+ return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
+ mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
+ mvm->fw->valid_rx_ant;
+}
+
+static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
+{
+ u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
+ FW_PHY_CFG_RX_CHAIN);
+ u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+ u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+ phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
+ valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+
+ return mvm->fw->phy_config & phy_config;
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm);
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd);
+
+/*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+ */
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
+ const u8 *data, u32 count);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ int queue);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* MVM PHY */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic);
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic);
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+
+/* MAC (virtual interface) programming */
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off, const u8 *bssid_override);
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *exclude_vif);
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+/* Bindings */
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+/* Quota management */
+static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_quota_low_latency(mvm) ?
+ sizeof(struct iwl_time_quota_cmd) :
+ sizeof(struct iwl_time_quota_cmd_v1);
+}
+
+static inline struct iwl_time_quota_data
+*iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd,
+ int i)
+{
+ struct iwl_time_quota_data_v1 *quotas;
+
+ if (iwl_mvm_has_quota_low_latency(mvm))
+ return &cmd->quotas[i];
+
+ quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas;
+ return (struct iwl_time_quota_data *)&quotas[i];
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
+ struct ieee80211_vif *disabled_vif);
+
+/* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+int iwl_mvm_scan_size(struct iwl_mvm *mvm);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout_wk(struct work_struct *work);
+
+/* Scheduled scan */
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* UMAC scan */
+int iwl_mvm_config_scan(struct iwl_mvm *mvm);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* MVM debugfs */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+#else
+static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+ struct dentry *dbgfs_dir)
+{
+ return 0;
+}
+static inline void
+iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+static inline void
+iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+/* rate scaling */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
+void rs_update_last_rssi(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_rx_status *rx_status);
+
+/* power management */
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_ps(struct iwl_mvm *mvm);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+#ifdef CONFIG_IWLWIFI_LEDS
+int iwl_mvm_leds_init(struct iwl_mvm *mvm);
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+void iwl_mvm_leds_sync(struct iwl_mvm *mvm);
+#else
+static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+}
+static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
+{
+}
+#endif
+
+/* D3 (WoWLAN, NetDetect) */
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwl_mvm_resume(struct ieee80211_hw *hw);
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
+extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM
+int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool host_awake,
+ u32 cmd_flags);
+void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status *status);
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#else
+static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool host_awake,
+ u32 cmd_flags)
+{
+ return 0;
+}
+
+static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status *status)
+{
+}
+
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd *cmd);
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ bool offload_ns,
+ u32 cmd_flags);
+
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
+int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
+
+/* BT Coex */
+int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum ieee80211_rssi_event_data);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum nl80211_band band);
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac);
+
+/* beacon filtering */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd);
+#else
+static inline void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{}
+#endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags);
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags);
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags);
+/* SMPS */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
+
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mvm_low_latency_cause cause);
+/* get SystemLowLatencyMode - only needed for beacon threshold? */
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
+
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+ /*
+ * should this consider associated/active/... state?
+ *
+ * Normally low-latency should only be active on interfaces
+ * that are active, but at least with debugfs it can also be
+ * enabled on interfaces that aren't active. However, when
+ * interface aren't active then they aren't added into the
+ * binding, so this has no real impact. For now, just return
+ * the current desired low-latency state.
+ */
+ return mvmvif->low_latency;
+}
+
+static inline
+void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
+ enum iwl_mvm_low_latency_cause cause)
+{
+ if (set)
+ mvmvif->low_latency |= cause;
+ else
+ mvmvif->low_latency &= ~cause;
+}
+
+/* hw scheduler queue config */
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout);
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout);
+
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u8 tid, u8 flags);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
+
+/* Return a bitmask with all the hw supported queues, except for the
+ * command queue, which can't be flushed.
+ */
+static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
+{
+ return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
+ ~BIT(IWL_MVM_DQA_CMD_QUEUE));
+}
+
+static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+{
+ iwl_fw_cancel_timestamp(&mvm->fwrt);
+ iwl_free_fw_paging(&mvm->fwrt);
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ iwl_fw_dump_conf_clear(&mvm->fwrt);
+ iwl_trans_stop_device(mvm->trans);
+}
+
+/* Stop/start all mac queues in a given bitmap */
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+
+/* Re-configure the SCD for a queue that has already been configured */
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ int tid, int frame_limit, u16 ssn);
+
+/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
+
+/* Location Aware Regulatory */
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ enum iwl_mcc_source src_id);
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed);
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+ bool *changed);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
+
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool added_vif);
+
+/* TDLS */
+
+/*
+ * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
+ * This TID is marked as used vs the AP and all connected TDLS peers.
+ */
+#define IWL_MVM_TDLS_FW_TID 4
+
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool sta_added);
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size);
+void iwl_mvm_reorder_timer_expired(struct timer_list *t);
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
+
+void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
+
+#define MVM_TCM_PERIOD_MSEC 500
+#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+#define MVM_LL_PERIOD (10 * HZ)
+void iwl_mvm_tcm_work(struct work_struct *work);
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
+
+void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool tdls, bool cmd_q);
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const char *errmsg);
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_sta *sta,
+ u16 tid);
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+#endif
+
+#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
new file mode 100644
index 000000000..875557ce0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -0,0 +1,646 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/firmware.h>
+#include <linux/rtnetlink.h>
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
+#include "fw/acpi.h"
+
+/* Default NVM size to read */
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
+
+#define NVM_WRITE_OPCODE 1
+#define NVM_READ_OPCODE 0
+
+/* load nvm chunk response */
+enum {
+ READ_NVM_CHUNK_SUCCEED = 0,
+ READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
+};
+
+/*
+ * prepare the NVM host command w/ the pointers to the nvm buffer
+ * and send it to fw
+ */
+static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, const u8 *data)
+{
+ struct iwl_nvm_access_cmd nvm_access_cmd = {
+ .offset = cpu_to_le16(offset),
+ .length = cpu_to_le16(length),
+ .type = cpu_to_le16(section),
+ .op_code = NVM_WRITE_OPCODE,
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .len = { sizeof(struct iwl_nvm_access_cmd), length },
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &nvm_access_cmd, data },
+ /* data may come from vmalloc, so use _DUP */
+ .dataflags = { 0, IWL_HCMD_DFL_DUP },
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_nvm_access_resp *nvm_resp;
+ int ret;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+ /* Extract & check NVM write response */
+ nvm_resp = (void *)pkt->data;
+ if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
+ IWL_ERR(mvm,
+ "NVM access write command failed for section %u (status = 0x%x)\n",
+ section, le16_to_cpu(nvm_resp->status));
+ ret = -EIO;
+ }
+
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, u8 *data)
+{
+ struct iwl_nvm_access_cmd nvm_access_cmd = {
+ .offset = cpu_to_le16(offset),
+ .length = cpu_to_le16(length),
+ .type = cpu_to_le16(section),
+ .op_code = NVM_READ_OPCODE,
+ };
+ struct iwl_nvm_access_resp *nvm_resp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &nvm_access_cmd, },
+ };
+ int ret, bytes_read, offset_read;
+ u8 *resp_data;
+
+ cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ /* Extract NVM response */
+ nvm_resp = (void *)pkt->data;
+ ret = le16_to_cpu(nvm_resp->status);
+ bytes_read = le16_to_cpu(nvm_resp->length);
+ offset_read = le16_to_cpu(nvm_resp->offset);
+ resp_data = nvm_resp->data;
+ if (ret) {
+ if ((offset != 0) &&
+ (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+ /*
+ * meaning of NOT_VALID_ADDRESS:
+ * driver try to read chunk from address that is
+ * multiple of 2K and got an error since addr is empty.
+ * meaning of (offset != 0): driver already
+ * read valid data from another chunk so this case
+ * is not an error.
+ */
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+ offset);
+ ret = 0;
+ } else {
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed with status %d (device: %s)\n",
+ ret, mvm->cfg->name);
+ ret = -EIO;
+ }
+ goto exit;
+ }
+
+ if (offset_read != offset) {
+ IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
+ offset_read);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Write data to NVM */
+ memcpy(data + offset, resp_data, bytes_read);
+ ret = bytes_read;
+
+exit:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
+ const u8 *data, u16 length)
+{
+ int offset = 0;
+
+ /* copy data in chunks of 2k (and remainder if any) */
+
+ while (offset < length) {
+ int chunk_size, ret;
+
+ chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
+ length - offset);
+
+ ret = iwl_nvm_write_chunk(mvm, section, offset,
+ chunk_size, data + offset);
+ if (ret < 0)
+ return ret;
+
+ offset += chunk_size;
+ }
+
+ return 0;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
+ u8 *data, u32 size_read)
+{
+ u16 length, offset = 0;
+ int ret;
+
+ /* Set nvm section read length */
+ length = IWL_NVM_DEFAULT_CHUNK_SIZE;
+
+ ret = length;
+
+ /* Read the NVM until exhausted (reading less than requested) */
+ while (ret == length) {
+ /* Check no memory assumptions fail and cause an overflow */
+ if ((size_read + offset + length) >
+ mvm->cfg->base_params->eeprom_size) {
+ IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
+ return -ENOBUFS;
+ }
+
+ ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+ if (ret < 0) {
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "Cannot read NVM from section %d offset %d, length %d\n",
+ section, offset, length);
+ return ret;
+ }
+ offset += ret;
+ }
+
+ iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM section %d read completed\n", section);
+ return offset;
+}
+
+static struct iwl_nvm_data *
+iwl_parse_nvm_sections(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+ const __be16 *hw;
+ const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
+ bool lar_enabled;
+ int regulatory_type;
+
+ /* Checking for required sections */
+ if (mvm->trans->cfg->nvm_type == IWL_NVM) {
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+ IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
+ return NULL;
+ }
+ } else {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+ else
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
+ /* SW and REGULATORY sections are mandatory */
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[regulatory_type].data) {
+ IWL_ERR(mvm,
+ "Can't parse empty family 8000 OTP/NVM sections\n");
+ return NULL;
+ }
+ /* MAC_OVERRIDE or at least HW section must exist */
+ if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+ IWL_ERR(mvm,
+ "Can't parse mac_address, empty sections\n");
+ return NULL;
+ }
+
+ /* PHY_SKU section is mandatory in B0 */
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+ IWL_ERR(mvm,
+ "Can't parse phy_sku in B0, empty sections\n");
+ return NULL;
+ }
+ }
+
+ hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
+ sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+ calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ mac_override =
+ (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+ phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+
+ regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
+ lar_enabled = !iwlwifi_mod_params.lar_disable &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+ regulatory, mac_override, phy_sku,
+ mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
+ lar_enabled);
+}
+
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+ int i, ret = 0;
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+ for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+ if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
+ continue;
+ ret = iwl_nvm_write_section(mvm, i, sections[i].data,
+ sections[i].length);
+ if (ret < 0) {
+ IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+int iwl_nvm_init(struct iwl_mvm *mvm)
+{
+ int ret, section;
+ u32 size_read = 0;
+ u8 *nvm_buffer, *temp;
+ const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
+
+ if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ return -EINVAL;
+
+ /* load NVM values from nic */
+ /* Read From FW NVM */
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
+ size_read);
+ if (ret < 0)
+ continue;
+ size_read += ret;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
+
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ switch (section) {
+ case NVM_SECTION_TYPE_SW:
+ mvm->nvm_sw_blob.data = temp;
+ mvm->nvm_sw_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_CALIBRATION:
+ mvm->nvm_calib_blob.data = temp;
+ mvm->nvm_calib_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_PRODUCTION:
+ mvm->nvm_prod_blob.data = temp;
+ mvm->nvm_prod_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_PHY_SKU:
+ mvm->nvm_phy_sku_blob.data = temp;
+ mvm->nvm_phy_sku_blob.size = ret;
+ break;
+ default:
+ if (section == mvm->cfg->nvm_hw_section_num) {
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ }
+ }
+#endif
+ }
+ if (!size_read)
+ IWL_ERR(mvm, "OTP is blank\n");
+ kfree(nvm_buffer);
+
+ /* Only if PNVM selected in the mod param - load external NVM */
+ if (mvm->nvm_file_name) {
+ /* read External NVM file from the mod param */
+ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
+ if (ret) {
+ mvm->nvm_file_name = nvm_file_C;
+
+ if ((ret == -EFAULT || ret == -ENOENT) &&
+ mvm->nvm_file_name) {
+ /* in case nvm file was failed try again */
+ ret = iwl_read_external_nvm(mvm->trans,
+ mvm->nvm_file_name,
+ mvm->nvm_sections);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ }
+ }
+
+ /* parse the relevant nvm sections */
+ mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+ if (!mvm->nvm_data)
+ return -ENODATA;
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
+ mvm->nvm_data->nvm_version);
+
+ return 0;
+}
+
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ enum iwl_mcc_source src_id)
+{
+ struct iwl_mcc_update_cmd mcc_update_cmd = {
+ .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+ .source_id = (u8)src_id,
+ };
+ struct iwl_mcc_update_resp *resp_cp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = MCC_UPDATE_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &mcc_update_cmd },
+ };
+
+ int ret;
+ u32 status;
+ int resp_len, n_channels;
+ u16 mcc;
+ bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
+
+ if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+ if (!resp_v2)
+ cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
+
+ IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
+ alpha2[0], alpha2[1], src_id);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pkt = cmd.resp_pkt;
+
+ /* Extract MCC response */
+ if (resp_v2) {
+ struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp->n_channels);
+ resp_len = sizeof(struct iwl_mcc_update_resp) +
+ n_channels * sizeof(__le32);
+ resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+ } else {
+ struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp_v1->n_channels);
+ resp_len = sizeof(struct iwl_mcc_update_resp) +
+ n_channels * sizeof(__le32);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ resp_cp->status = mcc_resp_v1->status;
+ resp_cp->mcc = mcc_resp_v1->mcc;
+ resp_cp->cap = mcc_resp_v1->cap;
+ resp_cp->source_id = mcc_resp_v1->source_id;
+ resp_cp->n_channels = mcc_resp_v1->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v1->channels,
+ n_channels * sizeof(__le32));
+ }
+
+ status = le32_to_cpu(resp_cp->status);
+
+ mcc = le16_to_cpu(resp_cp->mcc);
+
+ /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+ if (mcc == 0) {
+ mcc = 0x3030; /* "00" - world */
+ resp_cp->mcc = cpu_to_le16(mcc);
+ }
+
+ IWL_DEBUG_LAR(mvm,
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
+ status, mcc, mcc >> 8, mcc & 0xff, n_channels);
+
+exit:
+ iwl_free_resp(&cmd);
+ return resp_cp;
+}
+
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
+{
+ bool tlv_lar;
+ bool nvm_lar;
+ int retval;
+ struct ieee80211_regdomain *regd;
+ char mcc[3];
+
+ if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
+ tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ nvm_lar = mvm->nvm_data->lar_enabled;
+ if (tlv_lar != nvm_lar)
+ IWL_INFO(mvm,
+ "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+ tlv_lar ? "enabled" : "disabled",
+ nvm_lar ? "enabled" : "disabled");
+ }
+
+ if (!iwl_mvm_is_lar_supported(mvm))
+ return 0;
+
+ /*
+ * try to replay the last set MCC to FW. If it doesn't exist,
+ * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+ */
+ retval = iwl_mvm_init_fw_regd(mvm);
+ if (retval != -ENOENT)
+ return retval;
+
+ /*
+ * Driver regulatory hint for initial update, this also informs the
+ * firmware we support wifi location updates.
+ * Disallow scans that might crash the FW while the LAR regdomain
+ * is not set.
+ */
+ mvm->lar_regdom_set = false;
+
+ regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
+ !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+ kfree(regd);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
+ MCC_SOURCE_BIOS, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+ }
+
+ retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+ kfree(regd);
+ return retval;
+}
+
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+ enum iwl_mcc_source src;
+ char mcc[3];
+ struct ieee80211_regdomain *regd;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
+ IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
+ return;
+ }
+
+ if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+ return;
+
+ mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+ mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+ mcc[2] = '\0';
+ src = notif->source_id;
+
+ IWL_DEBUG_LAR(mvm,
+ "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+ mcc, src);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return;
+
+ regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+ kfree(regd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
new file mode 100644
index 000000000..6338d9cf7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -0,0 +1,255 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/bitops.h>
+#include "mvm.h"
+
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd *cmd)
+{
+ int i;
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 *before* using the value while we
+ * increment after using the value (i.e. store the next value to use).
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
+ seq -= 0x10;
+ cmd->qos_seq[i] = cpu_to_le16(seq);
+ }
+}
+
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ bool offload_ns,
+ u32 cmd_flags)
+{
+ union {
+ struct iwl_proto_offload_cmd_v1 v1;
+ struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v3_large v3l;
+ } cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = cmd_flags,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+ struct iwl_proto_offload_cmd_common *common;
+ u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+ /*
+ * Skip tentative address when ns offload is enabled to avoid
+ * violating RFC4862.
+ * Keep tentative address when ns offload is disabled so the NS packets
+ * will not be filtered out and will wake up the host.
+ */
+ bool skip_tentative = offload_ns;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+ int num_skipped = 0;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v3l.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v3l.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ if (skip_tentative &&
+ test_bit(i, mvmvif->tentative_addrs)) {
+ num_skipped++;
+ continue;
+ }
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (mvmvif->num_target_ipv6_addrs - num_skipped)
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs =
+ cpu_to_le32(i - num_skipped);
+ else
+ cmd.v3l.num_valid_ipv6_addrs =
+ cpu_to_le32(i - num_skipped);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ bool found = false;
+
+ BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
+ if (skip_tentative &&
+ test_bit(i, mvmvif->tentative_addrs))
+ continue;
+
+ memcpy(cmd.v2.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v2.target_ipv6_addr[i]));
+
+ found = true;
+ }
+ if (found) {
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+ memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+ } else {
+ bool found = false;
+ BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
+ if (skip_tentative &&
+ test_bit(i, mvmvif->tentative_addrs))
+ continue;
+
+ memcpy(cmd.v1.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v1.target_ipv6_addr[i]));
+
+ found = true;
+ }
+
+ if (found) {
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+ memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+ }
+
+ if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID))
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+#endif
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v3l.common;
+ size = sizeof(cmd.v3l);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ common = &cmd.v2.common;
+ size = sizeof(cmd.v2);
+ } else {
+ common = &cmd.v1.common;
+ size = sizeof(cmd.v1);
+ }
+
+ if (vif->bss_conf.arp_addr_cnt) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+ common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (!disable_offloading)
+ common->enabled = cpu_to_le32(enabled);
+
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
new file mode 100644
index 000000000..d93217161
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -0,0 +1,1732 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <net/mac80211.h>
+
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/img.h"
+#include "iwl-debug.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-phy-db.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "rs.h"
+#include "fw/api/scan.h"
+#include "time-event.h"
+#include "fw-api.h"
+#include "fw/api/scan.h"
+#include "fw/acpi.h"
+
+#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct iwl_op_mode_ops iwl_mvm_ops;
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
+
+struct iwl_mvm_mod_params iwlmvm_mod_params = {
+ .power_scheme = IWL_POWER_SCHEME_BPS,
+ .tfd_q_hang_detect = true
+ /* rest of fields are 0 by default */
+};
+
+module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
+MODULE_PARM_DESC(init_dbg,
+ "set to true to debug an ASSERT in INIT fw (default: false");
+module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
+MODULE_PARM_DESC(power_scheme,
+ "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
+ bool, 0444);
+MODULE_PARM_DESC(tfd_q_hang_detect,
+ "TFD queues hang detection (default: true");
+
+/*
+ * module init and exit functions
+ */
+static int __init iwl_mvm_init(void)
+{
+ int ret;
+
+ ret = iwl_mvm_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+ if (ret)
+ pr_err("Unable to register MVM op_mode: %d\n", ret);
+
+ return ret;
+}
+module_init(iwl_mvm_init);
+
+static void __exit iwl_mvm_exit(void)
+{
+ iwl_opmode_deregister("iwlmvm");
+ iwl_mvm_rate_control_unregister();
+}
+module_exit(iwl_mvm_exit);
+
+static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ u32 reg_val = 0;
+ u32 phy_config = iwl_mvm_get_phy_config(mvm);
+
+ radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
+ FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
+ FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
+ FW_PHY_CFG_RADIO_DASH_POS;
+
+ /* SKU control */
+ reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+ ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+
+ /*
+ * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
+ * sampling, and shouldn't be set to any non-zero value.
+ * The same is supposed to be true of the other HW, but unsetting
+ * them (such as the 7260) causes automatic tests to fail on seemingly
+ * unrelated errors. Need to further investigate this, but for now
+ * we'll separate cases.
+ */
+ if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+
+ iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+ reg_val);
+
+ IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+ radio_cfg_step, radio_cfg_dash);
+
+ /*
+ * W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted), causing ME FW
+ * to lose ownership and not being able to obtain it back.
+ */
+ if (!mvm->trans->cfg->apmg_not_supported)
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+/**
+ * enum iwl_rx_handler_context context for Rx handler
+ * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
+ * which can't acquire mvm->mutex.
+ * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
+ * (and only in this case!), it should be set as ASYNC. In that case,
+ * it will be called from a worker with mvm->mutex held.
+ * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
+ * mutex itself, it will be called from a worker without mvm->mutex held.
+ */
+enum iwl_rx_handler_context {
+ RX_HANDLER_SYNC,
+ RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_UNLOCKED,
+};
+
+/**
+ * struct iwl_rx_handlers handler for FW notification
+ * @cmd_id: command id
+ * @context: see &iwl_rx_handler_context
+ * @fn: the function is called when notification is received
+ */
+struct iwl_rx_handlers {
+ u16 cmd_id;
+ enum iwl_rx_handler_context context;
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+};
+
+#define RX_HANDLER(_cmd_id, _fn, _context) \
+ { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
+
+/*
+ * Handlers for fw notifications
+ * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
+ * This list should be in order of frequency for performance purposes.
+ *
+ * The handler can be one from three contexts, see &iwl_rx_handler_context
+ */
+static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
+ RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
+ RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
+ iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+ RX_HANDLER_ASYNC_LOCKED),
+
+ RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
+ iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
+ RX_HANDLER_SYNC),
+ RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
+ RX_HANDLER_ASYNC_LOCKED),
+
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER(SCAN_ITERATION_COMPLETE,
+ iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+ RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+ iwl_mvm_rx_lmac_scan_complete_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
+ RX_HANDLER_SYNC),
+ RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+ iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
+ RX_HANDLER_SYNC),
+
+ RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
+ RX_HANDLER_SYNC),
+
+ RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
+ RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
+ RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+ iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
+ RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+ iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
+ RX_HANDLER_SYNC),
+ RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
+ iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+ iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+ iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
+ iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
+};
+#undef RX_HANDLER
+#undef RX_HANDLER_GRP
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
+ HCMD_NAME(MVM_ALIVE),
+ HCMD_NAME(REPLY_ERROR),
+ HCMD_NAME(ECHO_CMD),
+ HCMD_NAME(INIT_COMPLETE_NOTIF),
+ HCMD_NAME(PHY_CONTEXT_CMD),
+ HCMD_NAME(DBG_CFG),
+ HCMD_NAME(SCAN_CFG_CMD),
+ HCMD_NAME(SCAN_REQ_UMAC),
+ HCMD_NAME(SCAN_ABORT_UMAC),
+ HCMD_NAME(SCAN_COMPLETE_UMAC),
+ HCMD_NAME(TOF_CMD),
+ HCMD_NAME(TOF_NOTIFICATION),
+ HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
+ HCMD_NAME(ADD_STA_KEY),
+ HCMD_NAME(ADD_STA),
+ HCMD_NAME(REMOVE_STA),
+ HCMD_NAME(FW_GET_ITEM_CMD),
+ HCMD_NAME(TX_CMD),
+ HCMD_NAME(SCD_QUEUE_CFG),
+ HCMD_NAME(TXPATH_FLUSH),
+ HCMD_NAME(MGMT_MCAST_KEY),
+ HCMD_NAME(WEP_KEY),
+ HCMD_NAME(SHARED_MEM_CFG),
+ HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
+ HCMD_NAME(MAC_CONTEXT_CMD),
+ HCMD_NAME(TIME_EVENT_CMD),
+ HCMD_NAME(TIME_EVENT_NOTIFICATION),
+ HCMD_NAME(BINDING_CONTEXT_CMD),
+ HCMD_NAME(TIME_QUOTA_CMD),
+ HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
+ HCMD_NAME(LEDS_CMD),
+ HCMD_NAME(LQ_CMD),
+ HCMD_NAME(FW_PAGING_BLOCK_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
+ HCMD_NAME(HOT_SPOT_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
+ HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
+ HCMD_NAME(BT_COEX_CI),
+ HCMD_NAME(PHY_CONFIGURATION_CMD),
+ HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
+ HCMD_NAME(PHY_DB_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
+ HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+ HCMD_NAME(POWER_TABLE_CMD),
+ HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+ HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
+ HCMD_NAME(DC2DC_CONFIG_CMD),
+ HCMD_NAME(NVM_ACCESS_CMD),
+ HCMD_NAME(BEACON_NOTIFICATION),
+ HCMD_NAME(BEACON_TEMPLATE_CMD),
+ HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(BT_CONFIG),
+ HCMD_NAME(STATISTICS_CMD),
+ HCMD_NAME(STATISTICS_NOTIFICATION),
+ HCMD_NAME(EOSP_NOTIFICATION),
+ HCMD_NAME(REDUCE_TX_POWER_CMD),
+ HCMD_NAME(CARD_STATE_NOTIFICATION),
+ HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+ HCMD_NAME(TDLS_CONFIG_CMD),
+ HCMD_NAME(MAC_PM_POWER_TABLE),
+ HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
+ HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+ HCMD_NAME(RSS_CONFIG_CMD),
+ HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
+ HCMD_NAME(REPLY_RX_PHY_CMD),
+ HCMD_NAME(REPLY_RX_MPDU_CMD),
+ HCMD_NAME(FRAME_RELEASE),
+ HCMD_NAME(BA_NOTIF),
+ HCMD_NAME(MCC_UPDATE_CMD),
+ HCMD_NAME(MCC_CHUB_UPDATE_CMD),
+ HCMD_NAME(MARKER_CMD),
+ HCMD_NAME(BT_PROFILE_NOTIFICATION),
+ HCMD_NAME(BCAST_FILTER_CMD),
+ HCMD_NAME(MCAST_FILTER_CMD),
+ HCMD_NAME(REPLY_SF_CFG_CMD),
+ HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
+ HCMD_NAME(D3_CONFIG_CMD),
+ HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
+ HCMD_NAME(OFFLOADS_QUERY_CMD),
+ HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
+ HCMD_NAME(MATCH_FOUND_NOTIFICATION),
+ HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_PATTERNS),
+ HCMD_NAME(WOWLAN_CONFIGURATION),
+ HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
+ HCMD_NAME(WOWLAN_TKIP_PARAM),
+ HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
+ HCMD_NAME(WOWLAN_GET_STATUSES),
+ HCMD_NAME(SCAN_ITERATION_COMPLETE),
+ HCMD_NAME(D0I3_END_CMD),
+ HCMD_NAME(LTR_CONFIG),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+ HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
+ HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ HCMD_NAME(CTDP_CONFIG_CMD),
+ HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(GEO_TX_POWER_LIMIT),
+ HCMD_NAME(CT_KILL_NOTIFICATION),
+ HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+ HCMD_NAME(DQA_ENABLE_CMD),
+ HCMD_NAME(UPDATE_MU_GROUPS_CMD),
+ HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+ HCMD_NAME(STA_PM_NOTIF),
+ HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+ HCMD_NAME(RX_QUEUES_NOTIFICATION),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+ HCMD_NAME(STORED_BEACON_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
+ HCMD_NAME(NVM_ACCESS_COMPLETE),
+ HCMD_NAME(NVM_GET_INFO),
+};
+
+static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
+ [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+ [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+ [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+ [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
+ [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
+ [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
+ [REGULATORY_AND_NVM_GROUP] =
+ HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
+};
+
+/* this forward declaration can avoid to export the function */
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
+{
+ const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
+ u64 dflt_pwr_limit;
+
+ if (!backoff)
+ return 0;
+
+ dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+
+ while (backoff->pwr) {
+ if (dflt_pwr_limit >= backoff->pwr)
+ return backoff->backoff;
+
+ backoff++;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
+{
+ struct iwl_mvm *mvm =
+ container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
+ struct ieee80211_vif *tx_blocked_vif;
+ struct iwl_mvm_vif *mvmvif;
+
+ mutex_lock(&mvm->mutex);
+
+ tx_blocked_vif =
+ rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex));
+
+ if (!tx_blocked_vif)
+ goto unlock;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_fwrt_dump_start(void *ctx)
+{
+ struct iwl_mvm *mvm = ctx;
+ int ret;
+
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ return 0;
+}
+
+static void iwl_mvm_fwrt_dump_end(void *ctx)
+{
+ struct iwl_mvm *mvm = ctx;
+
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+}
+
+static bool iwl_mvm_fwrt_fw_running(void *ctx)
+{
+ return iwl_mvm_firmware_running(ctx);
+}
+
+static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+ .dump_start = iwl_mvm_fwrt_dump_start,
+ .dump_end = iwl_mvm_fwrt_dump_end,
+ .fw_running = iwl_mvm_fwrt_fw_running,
+};
+
+static struct iwl_op_mode *
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+ struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
+ struct iwl_mvm *mvm;
+ struct iwl_trans_config trans_cfg = {};
+ static const u8 no_reclaim_cmds[] = {
+ TX_CMD,
+ };
+ int err, scan_size;
+ u32 min_backoff;
+
+ /*
+ * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * index all over the driver - check that its value corresponds to the
+ * array size.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
+
+ /********************************
+ * 1. Allocating and configuring HW data
+ ********************************/
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+ sizeof(struct iwl_mvm),
+ &iwl_mvm_hw_ops);
+ if (!hw)
+ return NULL;
+
+ if (cfg->max_rx_agg_size)
+ hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+
+ if (cfg->max_tx_agg_size)
+ hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+
+ op_mode = hw->priv;
+
+ mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ mvm->dev = trans->dev;
+ mvm->trans = trans;
+ mvm->cfg = cfg;
+ mvm->fw = fw;
+ mvm->hw = hw;
+
+ iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
+ dbgfs_dir);
+
+ mvm->init_status = 0;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ op_mode->ops = &iwl_mvm_ops_mq;
+ trans->rx_mpdu_cmd_hdr_size =
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
+ } else {
+ op_mode->ops = &iwl_mvm_ops;
+ trans->rx_mpdu_cmd_hdr_size =
+ sizeof(struct iwl_rx_mpdu_res_start);
+
+ if (WARN_ON(trans->num_rx_queues > 1))
+ goto out_free;
+ }
+
+ mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
+
+ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
+ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+
+ mvm->sf_state = SF_UNINIT;
+ if (iwl_mvm_has_unified_ucode(mvm))
+ iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
+ else
+ iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
+ mvm->drop_bcn_ap_mode = true;
+
+ mutex_init(&mvm->mutex);
+ mutex_init(&mvm->d0i3_suspend_mutex);
+ spin_lock_init(&mvm->async_handlers_lock);
+ INIT_LIST_HEAD(&mvm->time_event_list);
+ INIT_LIST_HEAD(&mvm->aux_roc_te_list);
+ INIT_LIST_HEAD(&mvm->async_handlers_list);
+ spin_lock_init(&mvm->time_event_lock);
+ spin_lock_init(&mvm->queue_info_lock);
+
+ INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
+ INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+ INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+ INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+ INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
+ INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+
+ spin_lock_init(&mvm->d0i3_tx_lock);
+ spin_lock_init(&mvm->refs_lock);
+ skb_queue_head_init(&mvm->d0i3_tx);
+ init_waitqueue_head(&mvm->d0i3_exit_waitq);
+ init_waitqueue_head(&mvm->rx_sync_waitq);
+
+ atomic_set(&mvm->queue_sync_counter, 0);
+
+ SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+
+ spin_lock_init(&mvm->tcm.lock);
+ INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ mvm->tcm.uapsd_nonagg_ts = jiffies;
+
+ INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+
+ /*
+ * Populate the state variables that the transport layer needs
+ * to know about.
+ */
+ trans_cfg.op_mode = op_mode;
+ trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+ trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_DEF:
+ case IWL_AMSDU_4K:
+ trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ break;
+ case IWL_AMSDU_8K:
+ trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+ break;
+ case IWL_AMSDU_12K:
+ trans_cfg.rx_buf_size = IWL_AMSDU_12K;
+ break;
+ default:
+ pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
+ iwlwifi_mod_params.amsdu_size);
+ trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ }
+
+ /* the hardware splits the A-MSDU */
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+ /* TODO: remove when balanced power mode is fw supported */
+ iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+ } else if (mvm->cfg->mq_rx_supported) {
+ trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ }
+
+ trans->wide_cmd_header = true;
+ trans_cfg.bc_table_dword =
+ mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
+
+ trans_cfg.command_groups = iwl_mvm_groups;
+ trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+
+ trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+ trans_cfg.scd_set_active = true;
+
+ trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
+
+ trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
+
+ /* Set a short watchdog for the command queue */
+ trans_cfg.cmd_q_wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
+
+ snprintf(mvm->hw->wiphy->fw_version,
+ sizeof(mvm->hw->wiphy->fw_version),
+ "%s", fw->fw_version);
+
+ /* Configure transport layer */
+ iwl_trans_configure(mvm->trans, &trans_cfg);
+
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
+ trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
+ memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+ sizeof(trans->dbg_conf_tlv));
+ trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
+
+ trans->iml = mvm->fw->iml;
+ trans->iml_len = mvm->fw->iml_len;
+
+ /* set up notification wait support */
+ iwl_notification_wait_init(&mvm->notif_wait);
+
+ /* Init phy db */
+ mvm->phy_db = iwl_phy_db_init(trans);
+ if (!mvm->phy_db) {
+ IWL_ERR(mvm, "Cannot init phy_db\n");
+ goto out_free;
+ }
+
+ IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
+ mvm->cfg->name, mvm->trans->hw_rev);
+
+ if (iwlwifi_mod_params.nvm_file)
+ mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+ else
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "working without external nvm file\n");
+
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
+ goto out_free;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ if (!iwlmvm_mod_params.init_dbg || !err)
+ iwl_mvm_stop_device(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
+ mutex_unlock(&mvm->mutex);
+ if (err < 0) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
+ }
+
+ scan_size = iwl_mvm_scan_size(mvm);
+
+ mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
+ if (!mvm->scan_cmd)
+ goto out_free;
+
+ /* Set EBS as successful as long as not stated otherwise by the FW. */
+ mvm->last_ebs_successful = true;
+
+ err = iwl_mvm_mac_setup_register(mvm);
+ if (err)
+ goto out_free;
+ mvm->hw_registered = true;
+
+ min_backoff = iwl_mvm_min_backoff(mvm);
+ iwl_mvm_thermal_initialize(mvm, min_backoff);
+
+ err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
+ if (err)
+ goto out_unregister;
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm))
+ memset(&mvm->rx_stats_v3, 0,
+ sizeof(struct mvm_statistics_rx_v3));
+ else
+ memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+
+ /* The transport always starts with a taken reference, we can
+ * release it now if d0i3 is supported */
+ if (iwl_mvm_is_d0i3_supported(mvm))
+ iwl_trans_unref(mvm->trans);
+
+ iwl_mvm_tof_init(mvm);
+
+ return op_mode;
+
+ out_unregister:
+ if (iwlmvm_mod_params.init_dbg)
+ return op_mode;
+
+ ieee80211_unregister_hw(mvm->hw);
+ mvm->hw_registered = false;
+ iwl_mvm_leds_exit(mvm);
+ iwl_mvm_thermal_exit(mvm);
+ out_free:
+ iwl_fw_flush_dump(&mvm->fwrt);
+
+ if (iwlmvm_mod_params.init_dbg)
+ return op_mode;
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+ iwl_trans_op_mode_leave(trans);
+
+ ieee80211_free_hw(mvm->hw);
+ return NULL;
+}
+
+static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int i;
+
+ /* If d0i3 is supported, we have released the reference that
+ * the transport started with, so we should take it back now
+ * that we are leaving.
+ */
+ if (iwl_mvm_is_d0i3_supported(mvm))
+ iwl_trans_ref(mvm->trans);
+
+ iwl_mvm_leds_exit(mvm);
+
+ iwl_mvm_thermal_exit(mvm);
+
+ if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
+ ieee80211_unregister_hw(mvm->hw);
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
+ }
+
+ kfree(mvm->scan_cmd);
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = NULL;
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
+ kfree(mvm->d3_resume_sram);
+#endif
+ iwl_trans_op_mode_leave(mvm->trans);
+
+ iwl_phy_db_free(mvm->phy_db);
+ mvm->phy_db = NULL;
+
+ kfree(mvm->nvm_data);
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
+ kfree(mvm->nvm_sections[i].data);
+
+ cancel_delayed_work_sync(&mvm->tcm.work);
+
+ iwl_mvm_tof_clean(mvm);
+
+ mutex_destroy(&mvm->mutex);
+ mutex_destroy(&mvm->d0i3_suspend_mutex);
+
+ ieee80211_free_hw(mvm->hw);
+}
+
+struct iwl_async_handler_entry {
+ struct list_head list;
+ struct iwl_rx_cmd_buffer rxb;
+ enum iwl_rx_handler_context context;
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+};
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mvm->async_handlers_lock);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ struct iwl_async_handler_entry *entry, *tmp;
+ LIST_HEAD(local_list);
+
+ /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
+
+ /*
+ * Sync with Rx path with a lock. Remove all the entries from this list,
+ * add them to a local one (lock free), and then handle them.
+ */
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_splice_init(&mvm->async_handlers_list, &local_list);
+ spin_unlock_bh(&mvm->async_handlers_lock);
+
+ list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ mutex_lock(&mvm->mutex);
+ entry->fn(mvm, &entry->rxb);
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ mutex_unlock(&mvm->mutex);
+ kfree(entry);
+ }
+}
+
+static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_cmd *cmds_trig;
+ int i;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
+ cmds_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
+ /* don't collect on CMD 0 */
+ if (!cmds_trig->cmds[i].cmd_id)
+ break;
+
+ if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+ cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
+ continue;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "CMD 0x%02x.%02x received",
+ pkt->hdr.group_id, pkt->hdr.cmd);
+ break;
+ }
+}
+
+static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_rx_packet *pkt)
+{
+ int i;
+
+ iwl_mvm_rx_check_trigger(mvm, pkt);
+
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+ const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
+ struct iwl_async_handler_entry *entry;
+
+ if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
+ continue;
+
+ if (rx_h->context == RX_HANDLER_SYNC) {
+ rx_h->fn(mvm, rxb);
+ return;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ /* we can't do much... */
+ if (!entry)
+ return;
+
+ entry->rxb._page = rxb_steal_page(rxb);
+ entry->rxb._offset = rxb->_offset;
+ entry->rxb._rx_page_order = rxb->_rx_page_order;
+ entry->fn = rx_h->fn;
+ entry->context = rx_h->context;
+ spin_lock(&mvm->async_handlers_lock);
+ list_add_tail(&entry->list, &mvm->async_handlers_list);
+ spin_unlock(&mvm->async_handlers_lock);
+ schedule_work(&mvm->async_handlers_wk);
+ break;
+ }
+}
+
+static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+ else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
+ iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+ else
+ iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mvm_rx_queue_notif(mvm, rxb, 0);
+ else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+ else
+ iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+ int q;
+
+ if (WARN_ON_ONCE(!mq))
+ return;
+
+ for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+ if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "mac80211 %d already stopped\n", q);
+ continue;
+ }
+
+ ieee80211_stop_queue(mvm->hw, q);
+ }
+}
+
+static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
+ const struct iwl_device_cmd *cmd)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /*
+ * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
+ * commands that need to block the Tx queues.
+ */
+ iwl_trans_block_txq_ptrs(mvm->trans, false);
+}
+
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ unsigned long mq;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mq = mvm->hw_queue_to_mac80211[hw_queue];
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_mvm_stop_mac_queues(mvm, mq);
+}
+
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+ int q;
+
+ if (WARN_ON_ONCE(!mq))
+ return;
+
+ for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+ if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "mac80211 %d still stopped\n", q);
+ continue;
+ }
+
+ ieee80211_wake_queue(mvm->hw, q);
+ }
+}
+
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ unsigned long mq;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mq = mvm->hw_queue_to_mac80211[hw_queue];
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_mvm_start_mac_queues(mvm, mq);
+}
+
+static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
+{
+ bool state = iwl_mvm_is_radio_killed(mvm);
+
+ if (state)
+ wake_up(&mvm->rx_sync_waitq);
+
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+}
+
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
+{
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+
+ iwl_mvm_set_rfkill_state(mvm);
+}
+
+static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ bool calibrating = READ_ONCE(mvm->calibrating);
+
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+
+ iwl_mvm_set_rfkill_state(mvm);
+
+ /* iwl_run_init_mvm_ucode is waiting for results, abort it */
+ if (calibrating)
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * Stop the device if we run OPERATIONAL firmware or if we are in the
+ * middle of the calibrations.
+ */
+ return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
+}
+
+static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
+struct iwl_mvm_reprobe {
+ struct device *dev;
+ struct work_struct work;
+};
+
+static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+{
+ struct iwl_mvm_reprobe *reprobe;
+
+ reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
+ if (device_reprobe(reprobe->dev))
+ dev_err(reprobe->dev, "reprobe failed!\n");
+ put_device(reprobe->dev);
+ kfree(reprobe);
+ module_put(THIS_MODULE);
+}
+
+void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
+{
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted scan when that was already done which
+ * is not a problem. It is necessary to abort any os scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211. If restart isn't called
+ * (no fw restart) scan status will stay busy.
+ */
+ iwl_mvm_report_scan_aborted(mvm);
+
+ /*
+ * If we're restarting already, don't cycle restarts.
+ * If INIT fw asserted, it will likely fail again.
+ * If WoWLAN fw asserted, don't restart either, mac80211
+ * can't recover this since we're already half suspended.
+ */
+ if (!mvm->fw_restart && fw_error) {
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ NULL);
+ } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_reprobe *reprobe;
+
+ IWL_ERR(mvm,
+ "Firmware error during reconfiguration - reprobe!\n");
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(mvm, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = get_device(mvm->trans->dev);
+ INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
+ schedule_work(&reprobe->work);
+ } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
+ mvm->hw_registered) {
+ /* don't let the transport/FW power down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+ if (fw_error && mvm->fw_restart > 0)
+ mvm->fw_restart--;
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+ ieee80211_restart_hw(mvm->hw);
+ }
+}
+
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_dump_nic_error_log(mvm);
+
+ iwl_mvm_nic_restart(mvm, true);
+}
+
+static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ WARN_ON(1);
+ iwl_mvm_nic_restart(mvm, true);
+}
+
+struct iwl_d0i3_iter_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *connected_vif;
+ u8 ap_sta_id;
+ u8 vif_count;
+ u8 offloading_tid;
+ bool disable_offloading;
+};
+
+static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta;
+ u32 available_tids = 0;
+ u8 tid;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+ mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
+ return false;
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+ if (!mvmsta)
+ return false;
+
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ /*
+ * in case of pending tx packets, don't use this tid
+ * for offloading in order to prevent reuse of the same
+ * qos seq counters.
+ */
+ if (iwl_mvm_tid_queued(mvm, tid_data))
+ continue;
+
+ if (tid_data->state != IWL_AGG_OFF)
+ continue;
+
+ available_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ /*
+ * disallow protocol offloading if we have no available tid
+ * (with no pending frames and no active aggregation,
+ * as we don't handle "holes" properly - the scheduler needs the
+ * frame's seq number and TFD index to match)
+ */
+ if (!available_tids)
+ return true;
+
+ /* for simplicity, just use the first available tid */
+ iter_data->offloading_tid = ffs(available_tids) - 1;
+ return false;
+}
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d0i3_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+ IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ /*
+ * in case of pending tx packets or active aggregations,
+ * avoid offloading features in order to prevent reuse of
+ * the same qos seq counters.
+ */
+ if (iwl_mvm_disallow_offloading(mvm, vif, data))
+ data->disable_offloading = true;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+ iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
+ false, flags);
+
+ /*
+ * on init/association, mvm already configures POWER_TABLE_CMD
+ * and REPLY_MCAST_FILTER_CMD, so currently don't
+ * reconfigure them (we might want to use different
+ * params later on, though).
+ */
+ data->ap_sta_id = mvmvif->ap_sta_id;
+ data->vif_count++;
+
+ /*
+ * no new commands can be sent at this stage, so it's safe
+ * to save the vif pointer during d0i3 entrance.
+ */
+ data->connected_vif = vif;
+}
+
+static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
+ struct iwl_wowlan_config_cmd *cmd,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+
+ if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
+ return;
+
+ rcu_read_lock();
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out;
+
+ mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+ cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
+ cmd->offloading_tid = iter_data->offloading_tid;
+ cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
+ ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
+ /*
+ * The d0i3 uCode takes care of the nonqos counters,
+ * so configure only the qos seq ones.
+ */
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
+out:
+ rcu_read_unlock();
+}
+
+int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+ int ret;
+ struct iwl_d0i3_iter_data d0i3_iter_data = {
+ .mvm = mvm,
+ };
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE),
+ };
+ struct iwl_d3_manager_config d3_cfg_cmd = {
+ .min_sleep_time = cpu_to_le32(1000),
+ .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
+ };
+
+ IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+ if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
+ return -EINVAL;
+
+ set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+
+ /*
+ * iwl_mvm_ref_sync takes a reference before checking the flag.
+ * so by checking there is no held reference we prevent a state
+ * in which iwl_mvm_ref_sync continues successfully while we
+ * configure the firmware to enter d0i3
+ */
+ if (iwl_mvm_ref_taken(mvm)) {
+ IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ return 1;
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_enter_d0i3_iterator,
+ &d0i3_iter_data);
+ if (d0i3_iter_data.vif_count == 1) {
+ mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+ mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
+ } else {
+ WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+ mvm->d0i3_offloading = false;
+ }
+
+ iwl_mvm_pause_tcm(mvm, true);
+ /* make sure we have no running tx while configuring the seqno */
+ synchronize_net();
+
+ /* Flush the hw queues, in case something got queued during entry */
+ /* TODO new tx api */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
+ } else {
+ ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ /* configure wowlan configuration only if needed */
+ if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
+ /* wake on beacons only if beacon storing isn't supported */
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEACON_STORING))
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+
+ iwl_mvm_wowlan_config_key_params(mvm,
+ d0i3_iter_data.connected_vif,
+ true, flags);
+
+ iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
+ &d0i3_iter_data);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+ flags | CMD_MAKE_TRANS_IDLE,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+ IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+struct iwl_mvm_d0i3_exit_work_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_wowlan_status *status;
+ u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 reasons = data->wakeup_reasons;
+
+ /* consider only the relevant station interface */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+ data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
+ return;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+ iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+ else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
+ ieee80211_beacon_loss(vif);
+ else
+ iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
+}
+
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ int i;
+ bool wake_queues = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->d0i3_tx_lock);
+
+ if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
+ goto out;
+
+ IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+
+ /* get the sta in order to update seq numbers and re-enqueue skbs */
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (IS_ERR_OR_NULL(sta)) {
+ sta = NULL;
+ goto out;
+ }
+
+ if (mvm->d0i3_offloading && qos_seq) {
+ /* update qos seq numbers if offloading was enabled */
+ mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = le16_to_cpu(qos_seq[i]);
+ /* firmware stores last-used one, we store next one */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+ }
+out:
+ /* re-enqueue (or drop) all packets */
+ while (!skb_queue_empty(&mvm->d0i3_tx)) {
+ struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
+
+ if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+
+ /* if the skb_queue is not empty, we need to wake queues */
+ wake_queues = true;
+ }
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+ if (wake_queues)
+ ieee80211_wake_queues(mvm->hw);
+
+ spin_unlock_bh(&mvm->d0i3_tx_lock);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+ struct iwl_host_cmd get_status_cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
+ };
+ struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
+ .mvm = mvm,
+ };
+
+ struct iwl_wowlan_status *status;
+ int ret;
+ u32 wakeup_reasons = 0;
+ __le16 *qos_seq = NULL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+ if (ret)
+ goto out;
+
+ status = (void *)get_status_cmd.resp_pkt->data;
+ wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+ qos_seq = status->qos_seq_ctr;
+
+ IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+ iter_data.wakeup_reasons = wakeup_reasons;
+ iter_data.status = status;
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d0i3_exit_work_iter,
+ &iter_data);
+out:
+ iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+
+ IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
+ wakeup_reasons);
+
+ /* qos_seq might point inside resp_pkt, so free it only now */
+ if (get_status_cmd.resp_pkt)
+ iwl_free_resp(&get_status_cmd);
+
+ /* the FW might have updated the regdomain */
+ iwl_mvm_update_changed_regdom(mvm);
+
+ iwl_mvm_resume_tcm(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
+ mutex_unlock(&mvm->mutex);
+}
+
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
+{
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+ CMD_WAKE_UP_TRANS;
+ int ret;
+
+ IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+ if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
+ return -EINVAL;
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+ IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+ __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+ if (ret)
+ goto out;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_exit_d0i3_iterator,
+ mvm);
+out:
+ schedule_work(&mvm->d0i3_exit_work);
+ return ret;
+}
+
+int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+ return _iwl_mvm_exit_d0i3(mvm);
+}
+
+#define IWL_MVM_COMMON_OPS \
+ /* these could be differentiated */ \
+ .async_cb = iwl_mvm_async_cb, \
+ .queue_full = iwl_mvm_stop_sw_queue, \
+ .queue_not_full = iwl_mvm_wake_sw_queue, \
+ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
+ .free_skb = iwl_mvm_free_skb, \
+ .nic_error = iwl_mvm_nic_error, \
+ .cmd_queue_full = iwl_mvm_cmd_queue_full, \
+ .nic_config = iwl_mvm_nic_config, \
+ .enter_d0i3 = iwl_mvm_enter_d0i3, \
+ .exit_d0i3 = iwl_mvm_exit_d0i3, \
+ /* as we only register one, these MUST be common! */ \
+ .start = iwl_op_mode_mvm_start, \
+ .stop = iwl_op_mode_mvm_stop
+
+static const struct iwl_op_mode_ops iwl_mvm_ops = {
+ IWL_MVM_COMMON_OPS,
+ .rx = iwl_mvm_rx,
+};
+
+static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb,
+ unsigned int queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mvm_rx_queue_notif(mvm, rxb, queue);
+ else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
+ IWL_MVM_COMMON_OPS,
+ .rx = iwl_mvm_rx_mq,
+ .rx_rss = iwl_mvm_rx_mq_rss,
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
new file mode 100644
index 000000000..7f5434b34
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -0,0 +1,326 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+/* Maps the driver specific channel width definition to the fw values */
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return PHY_VHT_CHANNEL_MODE20;
+ case NL80211_CHAN_WIDTH_40:
+ return PHY_VHT_CHANNEL_MODE40;
+ case NL80211_CHAN_WIDTH_80:
+ return PHY_VHT_CHANNEL_MODE80;
+ case NL80211_CHAN_WIDTH_160:
+ return PHY_VHT_CHANNEL_MODE160;
+ default:
+ WARN(1, "Invalid channel width=%u", chandef->width);
+ return PHY_VHT_CHANNEL_MODE20;
+ }
+}
+
+/*
+ * Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the the fw values
+ */
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->chan->center_freq - chandef->center_freq1) {
+ case -70:
+ return PHY_VHT_CTRL_POS_4_BELOW;
+ case -50:
+ return PHY_VHT_CTRL_POS_3_BELOW;
+ case -30:
+ return PHY_VHT_CTRL_POS_2_BELOW;
+ case -10:
+ return PHY_VHT_CTRL_POS_1_BELOW;
+ case 10:
+ return PHY_VHT_CTRL_POS_1_ABOVE;
+ case 30:
+ return PHY_VHT_CTRL_POS_2_ABOVE;
+ case 50:
+ return PHY_VHT_CTRL_POS_3_ABOVE;
+ case 70:
+ return PHY_VHT_CTRL_POS_4_ABOVE;
+ default:
+ WARN(1, "Invalid channel definition");
+ case 0:
+ /*
+ * The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one.
+ */
+ return PHY_VHT_CTRL_POS_1_BELOW;
+ }
+}
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+ struct iwl_phy_context_cmd *cmd,
+ u32 action, u32 apply_time)
+{
+ memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd->action = cpu_to_le32(action);
+ cmd->apply_time = cpu_to_le32(apply_time);
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ u8 active_cnt, idle_cnt;
+
+ /* Set the channel info data */
+ cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5);
+
+ cmd->ci.channel = chandef->chan->hw_value;
+ cmd->ci.width = iwl_mvm_get_channel_width(chandef);
+ cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+
+ /* Set rx the chains */
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ /* In scenarios where we only ever use a single-stream rates,
+ * i.e. legacy 11b/g/a associations, single-stream APs or even
+ * static SMPS, enable both chains to get diversity, improving
+ * the case where we're far enough from the AP that attenuation
+ * between the two antennas is sufficiently different to impact
+ * performance.
+ */
+ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+ idle_cnt = 2;
+ active_cnt = 2;
+ }
+
+ cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ PHY_RX_CHAIN_VALID_POS);
+ cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+ PHY_RX_CHAIN_MIMO_CNT_POS);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (unlikely(mvm->dbgfs_rx_phyinfo))
+ cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+#endif
+
+ cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+}
+
+/*
+ * Send a command to apply the current phy configuration. The command is send
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic,
+ u32 action, u32 apply_time)
+{
+ struct iwl_phy_context_cmd cmd;
+ int ret;
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+ chains_static, chains_dynamic);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
+ sizeof(struct iwl_phy_context_cmd),
+ &cmd);
+ if (ret)
+ IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+ return ret;
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ ctxt->ref);
+ lockdep_assert_held(&mvm->mutex);
+
+ ctxt->channel = chandef->chan;
+
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD, 0);
+}
+
+/*
+ * Update the number of references to the given PHY context. This is valid only
+ * in case the PHY context was already created, i.e., its reference count > 0.
+ */
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ lockdep_assert_held(&mvm->mutex);
+ ctxt->ref++;
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ ctxt->channel->band != chandef->chan->band) {
+ int ret;
+
+ /* ... remove it here ...*/
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_REMOVE, 0);
+ if (ret)
+ return ret;
+
+ /* ... and proceed to add it again */
+ action = FW_CTXT_ACTION_ADD;
+ }
+
+ ctxt->channel = chandef->chan;
+ ctxt->width = chandef->width;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ action, 0);
+}
+
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!ctxt))
+ return;
+
+ ctxt->ref--;
+
+ /*
+ * Move unused phy's to a default channel. When the phy is moved the,
+ * fw will cleanup immediate quiet bit if it was previously set,
+ * otherwise we might not be able to reuse this phy.
+ */
+ if (ctxt->ref == 0) {
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
+ }
+}
+
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ unsigned long *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_AP)
+ __set_bit(mvmvif->phy_ctxt->id, data);
+}
+
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
+{
+ unsigned long phy_ctxt_counter = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_binding_iterator,
+ &phy_ctxt_counter);
+
+ return hweight8(phy_ctxt_counter);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
new file mode 100644
index 000000000..cd19831ac
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -0,0 +1,1066 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw/api/power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 flags)
+{
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ le32_to_cpu(cmd->ba_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ le32_to_cpu(cmd->bf_debug_flag));
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ le32_to_cpu(cmd->bf_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
+
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ sizeof(struct iwl_beacon_filter_cmd), cmd);
+}
+
+static
+void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd,
+ bool d0i3)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
+ cmd->bf_energy_delta =
+ cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+ /* fw uses an absolute value for this */
+ cmd->bf_roaming_state =
+ cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+ }
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+}
+
+static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+ struct iwl_mac_power_cmd *cmd)
+{
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd->id_and_color, iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd->flags));
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+ le16_to_cpu(cmd->keep_alive_seconds));
+
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+ IWL_DEBUG_POWER(mvm, "Disable power management\n");
+ return;
+ }
+
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ cmd->skip_dtim_periods);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ cmd->lprx_rssi_threshold);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+ IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+ IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+ IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
+ }
+}
+
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+ if (mvmvif->dbgfs_pm.use_ps_poll) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ return;
+ }
+#endif
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->queue_params[ac].uapsd)
+ continue;
+
+ if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window =
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
+
+ if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+}
+
+static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *is_p2p_standalone = _data;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ *is_p2p_standalone = false;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->bss_conf.assoc)
+ *is_p2p_standalone = false;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ return false;
+
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return false;
+
+ /*
+ * Avoid using uAPSD if client is in DCM -
+ * low latency issue in Miracast
+ */
+ if (iwl_mvm_phy_ctx_count(mvm) >= 2)
+ return false;
+
+ if (vif->p2p) {
+ /* Allow U-APSD only if p2p is stand alone */
+ bool is_p2p_standalone = true;
+
+ if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_p2p_standalone_iterator,
+ &is_p2p_standalone);
+
+ if (!is_p2p_standalone)
+ return false;
+ }
+
+ return true;
+}
+
+static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *chan;
+ bool radar_detect = false;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ WARN_ON(!chanctx_conf);
+ if (chanctx_conf) {
+ chan = chanctx_conf->def.chan;
+ radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+ }
+ rcu_read_unlock();
+
+ return radar_detect;
+}
+
+static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd,
+ bool host_awake)
+{
+ int dtimper = vif->bss_conf.dtim_period ?: 1;
+ int skip;
+
+ /* disable, in case we're supposed to override */
+ cmd->skip_dtim_periods = 0;
+ cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+
+ if (iwl_mvm_power_is_radar(vif))
+ return;
+
+ if (dtimper >= 10)
+ return;
+
+ /* TODO: check that multicast wake lock is off */
+
+ if (host_awake) {
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ return;
+ skip = 2;
+ } else {
+ int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+
+ if (WARN_ON(!dtimper_tu))
+ return;
+ /* configure skip over dtim up to 306TU - 314 msec */
+ skip = max_t(u8, 1, 306 / dtimper_tu);
+ }
+
+ /* the firmware really expects "look at every X DTIMs", so add 1 */
+ cmd->skip_dtim_periods = 1 + skip;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd,
+ bool host_awake)
+{
+ int dtimper, bi;
+ int keep_alive;
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ dtimper = vif->bss_conf.dtim_period;
+ bi = vif->bss_conf.beacon_int;
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
+ */
+ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+ USEC_PER_SEC);
+ keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+ if (mvm->ps_disabled)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
+ return;
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+ (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
+ !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ if (vif->bss_conf.beacon_rate &&
+ (vif->bss_conf.beacon_rate->bitrate == 10 ||
+ vif->bss_conf.beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+ }
+
+ iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
+
+ if (!host_awake) {
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
+ }
+
+ if (iwl_mvm_power_allow_uapsd(mvm, vif))
+ iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+ cmd->keep_alive_seconds =
+ cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+ if (mvmvif->dbgfs_pm.skip_over_dtim)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+ cmd->rx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+ cmd->tx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+ cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+ if (mvmvif->dbgfs_pm.lprx_ena)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ else
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+ cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+ if (mvmvif->dbgfs_pm.snooze_ena)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+ u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+ if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+ cmd->flags |= cpu_to_le16(flag);
+ else
+ cmd->flags &= cpu_to_le16(flag);
+ }
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+}
+
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mac_power_cmd cmd = {};
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd,
+ mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
+ iwl_mvm_power_log(mvm, &cmd);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
+#endif
+
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
+ sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ struct iwl_device_power_cmd cmd = {
+ .flags = 0,
+ };
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ mvm->ps_disabled = true;
+
+ if (!mvm->ps_disabled)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ mvm->disable_power_off_d3 : mvm->disable_power_off)
+ cmd.flags &=
+ cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ IWL_DEBUG_POWER(mvm,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
+ &cmd);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
+ ETH_ALEN))
+ eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
+}
+
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u8 *ap_sta_id = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* The ap_sta_id is not expected to change during current association
+ * so no explicit protection is needed
+ */
+ if (mvmvif->ap_sta_id == *ap_sta_id)
+ memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN);
+}
+
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+}
+
+struct iwl_power_vifs {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *bss_vif;
+ struct ieee80211_vif *p2p_vif;
+ struct ieee80211_vif *ap_vif;
+ struct ieee80211_vif *monitor_vif;
+ bool p2p_active;
+ bool bss_active;
+ bool ap_active;
+ bool monitor_active;
+};
+
+static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->pm_enabled = false;
+}
+
+static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *disable_ps = _data;
+
+ if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+ *disable_ps |= mvmvif->ps_disabled;
+}
+
+static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_power_vifs *power_iterator = _data;
+ bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+
+ if (!mvmvif->uploaded)
+ return;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->ap_vif);
+ power_iterator->ap_vif = vif;
+ if (active)
+ power_iterator->ap_active = true;
+ break;
+
+ case NL80211_IFTYPE_MONITOR:
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->monitor_vif);
+ power_iterator->monitor_vif = vif;
+ if (active)
+ power_iterator->monitor_active = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->p2p_vif);
+ power_iterator->p2p_vif = vif;
+ if (active)
+ power_iterator->p2p_active = true;
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ power_iterator->bss_vif = vif;
+ if (active)
+ power_iterator->bss_active = true;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+ struct iwl_power_vifs *vifs)
+{
+ struct iwl_mvm_vif *bss_mvmvif = NULL;
+ struct iwl_mvm_vif *p2p_mvmvif = NULL;
+ struct iwl_mvm_vif *ap_mvmvif = NULL;
+ bool client_same_channel = false;
+ bool ap_same_channel = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* set pm_enable to false */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_disable_pm_iterator,
+ NULL);
+
+ if (vifs->bss_vif)
+ bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
+
+ if (vifs->p2p_vif)
+ p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
+
+ if (vifs->ap_vif)
+ ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+
+ /* don't allow PM if any TDLS stations exist */
+ if (iwl_mvm_tdls_sta_count(mvm, NULL))
+ return;
+
+ /* enable PM on bss if bss stand alone */
+ if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+ bss_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ /* enable PM on p2p if p2p stand alone */
+ if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+ p2p_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ if (vifs->bss_active && vifs->p2p_active)
+ client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ p2p_mvmvif->phy_ctxt->id);
+ if (vifs->bss_active && vifs->ap_active)
+ ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ ap_mvmvif->phy_ctxt->id);
+
+ /* clients are not stand alone: enable PM if DCM */
+ if (!(client_same_channel || ap_same_channel)) {
+ if (vifs->bss_active)
+ bss_mvmvif->pm_enabled = true;
+ if (vifs->p2p_active)
+ p2p_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ /*
+ * There is only one channel in the system and there are only
+ * bss and p2p clients that share it
+ */
+ if (client_same_channel && !vifs->ap_active) {
+ /* share same channel*/
+ bss_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ le16_to_cpu(cmd.keep_alive_seconds));
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ }
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+ 1 : 0);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+ cmd.snooze_window);
+
+ return pos;
+}
+
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
+ cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
+ cmd->bf_roaming_energy_delta =
+ cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
+ cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
+ cmd->bf_temp_threshold =
+ cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
+ cmd->bf_temp_fast_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
+ cmd->bf_temp_slow_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
+ cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
+ cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
+ cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
+ cmd->ba_enable_beacon_abort =
+ cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
+}
+#endif
+
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 cmd_flags,
+ bool d0i3)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
+ vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
+ if (!d0i3)
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
+ mvmvif->bf_data.bf_enabled = true;
+
+ return ret;
+}
+
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
+static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags, bool d0i3)
+{
+ struct iwl_beacon_filter_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
+ mvmvif->bf_data.bf_enabled = false;
+
+ return ret;
+}
+
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
+}
+
+static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
+{
+ bool disable_ps;
+ int ret;
+
+ /* disable PS if CAM */
+ disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+ /* ...or if any of the vifs require PS to be off */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_ps_disabled_iterator,
+ &disable_ps);
+
+ /* update device power state if it has changed */
+ if (mvm->ps_disabled != disable_ps) {
+ bool old_ps_disabled = mvm->ps_disabled;
+
+ mvm->ps_disabled = disable_ps;
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret) {
+ mvm->ps_disabled = old_ps_disabled;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ if (!mvmvif->bf_data.bf_enabled)
+ return 0;
+
+ if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
+ mvm->ps_disabled ||
+ !vif->bss_conf.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
+}
+
+int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
+{
+ struct iwl_power_vifs vifs = {
+ .mvm = mvm,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* get vifs info */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_get_vifs_iterator, &vifs);
+
+ ret = iwl_mvm_power_set_ps(mvm);
+ if (ret)
+ return ret;
+
+ if (vifs.bss_vif)
+ return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+ return 0;
+}
+
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
+{
+ struct iwl_power_vifs vifs = {
+ .mvm = mvm,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* get vifs info */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_get_vifs_iterator, &vifs);
+
+ iwl_mvm_power_set_pm(mvm, &vifs);
+
+ ret = iwl_mvm_power_set_ps(mvm);
+ if (ret)
+ return ret;
+
+ if (vifs.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (vifs.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (vifs.bss_vif)
+ return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+ return 0;
+}
+
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags)
+{
+ int ret;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ if (!vif->bss_conf.assoc)
+ return 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
+
+ iwl_mvm_power_log(mvm, &cmd);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
+#endif
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* configure beacon filtering */
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (enable) {
+ struct iwl_beacon_filter_cmd cmd_bf = {
+ IWL_BF_CMD_CONFIG_D0I3,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+ /*
+ * When beacon storing is supported - disable beacon filtering
+ * altogether - the latest beacon will be sent when exiting d0i3
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEACON_STORING))
+ ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags,
+ true);
+ else
+ ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+ flags, true);
+ } else {
+ if (mvmvif->bf_data.bf_enabled)
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+ else
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
new file mode 100644
index 000000000..690559bdf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -0,0 +1,359 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+#define QUOTA_100 IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
+struct iwl_mvm_quota_iterator_data {
+ int n_interfaces[MAX_BINDINGS];
+ int colors[MAX_BINDINGS];
+ int low_latency[MAX_BINDINGS];
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ int dbgfs_min[MAX_BINDINGS];
+#endif
+ int n_low_latency_bindings;
+ struct ieee80211_vif *disabled_vif;
+};
+
+static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_quota_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 id;
+
+ /* skip disabled interfaces here immediately */
+ if (vif == data->disabled_vif)
+ return;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ /* currently, PHY ID == binding ID */
+ id = mvmvif->phy_ctxt->id;
+
+ /* need at least one binding per PHY */
+ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+
+ if (WARN_ON_ONCE(id >= MAX_BINDINGS))
+ return;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->bss_conf.assoc)
+ break;
+ return;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ if (mvmvif->ap_ibss_active)
+ break;
+ return;
+ case NL80211_IFTYPE_MONITOR:
+ if (mvmvif->monitor_active)
+ break;
+ return;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (data->colors[id] < 0)
+ data->colors[id] = mvmvif->phy_ctxt->color;
+ else
+ WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+
+ data->n_interfaces[id]++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_quota_min)
+ data->dbgfs_min[id] = max(data->dbgfs_min[id],
+ mvmvif->dbgfs_quota_min);
+#endif
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+ data->n_low_latency_bindings++;
+ data->low_latency[id] = true;
+ }
+}
+
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct iwl_mvm_vif *mvmvif;
+ int i, phy_id = -1, beacon_int = 0;
+
+ if (!mvm->noa_duration || !mvm->noa_vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ phy_id = mvmvif->phy_ctxt->id;
+ beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ struct iwl_time_quota_data *data =
+ iwl_mvm_quota_cmd_get_quota(mvm, cmd,
+ i);
+ u32 id_n_c = le32_to_cpu(data->id_and_color);
+ u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+ u32 quota = le32_to_cpu(data->quota);
+
+ if (id != phy_id)
+ continue;
+
+ quota *= (beacon_int - mvm->noa_duration);
+ quota /= beacon_int;
+
+ IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
+ le32_to_cpu(data->quota), quota);
+
+ data->quota = cpu_to_le32(quota);
+ }
+#endif
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+ bool force_update,
+ struct ieee80211_vif *disabled_vif)
+{
+ struct iwl_time_quota_cmd cmd = {};
+ int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
+ struct iwl_mvm_quota_iterator_data data = {
+ .n_interfaces = {},
+ .colors = { -1, -1, -1, -1 },
+ .disabled_vif = disabled_vif,
+ };
+ struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd;
+ struct iwl_time_quota_data *qdata, *last_data;
+ bool send = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+ return 0;
+
+ /* update all upon completion */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ /* iterator data above must match */
+ BUILD_BUG_ON(MAX_BINDINGS != 4);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_quota_iterator, &data);
+
+ /*
+ * The FW's scheduling session consists of
+ * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+ * equally between all the bindings that require quota
+ */
+ num_active_macs = 0;
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ num_active_macs += data.n_interfaces[i];
+ }
+
+ n_non_lowlat = num_active_macs;
+
+ if (data.n_low_latency_bindings == 1) {
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (data.low_latency[i]) {
+ n_non_lowlat -= data.n_interfaces[i];
+ break;
+ }
+ }
+ }
+
+ if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+ /*
+ * Reserve quota for the low latency binding in case that
+ * there are several data bindings but only a single
+ * low latency one. Split the rest of the quota equally
+ * between the other data interfaces.
+ */
+ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+ quota_rem = QUOTA_100 - n_non_lowlat * quota -
+ QUOTA_LOWLAT_MIN;
+ IWL_DEBUG_QUOTA(mvm,
+ "quota: low-latency binding active, remaining quota per other binding: %d\n",
+ quota);
+ } else if (num_active_macs) {
+ /*
+ * There are 0 or more than 1 low latency bindings, or all the
+ * data interfaces belong to the single low latency binding.
+ * Split the quota equally between the data interfaces.
+ */
+ quota = QUOTA_100 / num_active_macs;
+ quota_rem = QUOTA_100 % num_active_macs;
+ IWL_DEBUG_QUOTA(mvm,
+ "quota: splitting evenly per binding: %d\n",
+ quota);
+ } else {
+ /* values don't really matter - won't be used */
+ quota = 0;
+ quota_rem = 0;
+ }
+
+ for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+ if (data.colors[i] < 0)
+ continue;
+
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx);
+
+ qdata->id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+
+ if (data.n_interfaces[i] <= 0)
+ qdata->quota = cpu_to_le32(0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ else if (data.dbgfs_min[i])
+ qdata->quota =
+ cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+#endif
+ else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+ data.low_latency[i])
+ /*
+ * There is more than one binding, but only one of the
+ * bindings is in low latency. For this case, allocate
+ * the minimal required quota for the low latency
+ * binding.
+ */
+ qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+ else
+ qdata->quota =
+ cpu_to_le32(quota * data.n_interfaces[i]);
+
+ WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100,
+ "Binding=%d, quota=%u > max=%u\n",
+ idx, le32_to_cpu(qdata->quota), QUOTA_100);
+
+ qdata->max_duration = cpu_to_le32(0);
+
+ idx++;
+ }
+
+ /* Give the remainder of the session to the first data binding */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ if (le32_to_cpu(qdata->quota) != 0) {
+ le32_add_cpu(&qdata->quota, quota_rem);
+ IWL_DEBUG_QUOTA(mvm,
+ "quota: giving remainder of %d to binding %d\n",
+ quota_rem, i);
+ break;
+ }
+ }
+
+ iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
+ /* check that we have non-zero quota for all valid bindings */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i);
+ if (qdata->id_and_color != last_data->id_and_color)
+ send = true;
+ if (qdata->max_duration != last_data->max_duration)
+ send = true;
+ if (abs((int)le32_to_cpu(qdata->quota) -
+ (int)le32_to_cpu(last_data->quota))
+ > IWL_MVM_QUOTA_THRESHOLD)
+ send = true;
+ if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID))
+ continue;
+ WARN_ONCE(qdata->quota == 0,
+ "zero quota on binding %d\n", i);
+ }
+
+ if (!send && !force_update) {
+ /* don't send a practically unchanged command, the firmware has
+ * to re-initialize a lot of state and that can have an adverse
+ * impact on it
+ */
+ return 0;
+ }
+
+ err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+ iwl_mvm_quota_cmd_size(mvm), &cmd);
+
+ if (err)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", err);
+ else
+ mvm->last_quota_cmd = cmd;
+ return err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 000000000..5e1e671d2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,374 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+ }
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+ u8 fw_chains = 0;
+
+ if (chains & ANT_A)
+ fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+ if (chains & ANT_B)
+ fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+ if (chains & ANT_C)
+ WARN(false,
+ "tlc offload doesn't support antenna C. chains: 0x%x\n",
+ chains);
+
+ return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u8 supp = 0;
+
+ if (he_cap && he_cap->has_he)
+ return 0;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+
+ return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ bool vht_ena = vht_cap && vht_cap->vht_supported;
+ u16 flags = 0;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+ if (mvm->cfg->ht_params->ldpc &&
+ ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ return IWL_TLC_MNG_HT_RATE_MCS7;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ return IWL_TLC_MNG_HT_RATE_MCS8;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ return IWL_TLC_MNG_HT_RATE_MCS9;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 supp;
+ int i, highest_mcs;
+
+ for (i = 0; i < sta->rx_nss; i++) {
+ if (i == MAX_NSS)
+ break;
+
+ highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+ if (!highest_mcs)
+ continue;
+
+ supp = BIT(highest_mcs + 1) - 1;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+ cmd->ht_rates[i][0] = cpu_to_le16(supp);
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ cmd->ht_rates[i][1] = cmd->ht_rates[i][0];
+ }
+}
+
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_he_cap *he_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ int i;
+
+ for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+ cmd->ht_rates[i][0] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+ cmd->ht_rates[i][1] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+ }
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+ struct ieee80211_supported_band *sband,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ int i;
+ unsigned long tmp;
+ unsigned long supp; /* must be unsigned long for for_each_set_bit */
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+
+ /* non HT rates */
+ supp = 0;
+ tmp = sta->supp_rates[sband->band];
+ for_each_set_bit(i, &tmp, BITS_PER_LONG)
+ supp |= BIT(sband->bitrates[i].hw_value);
+
+ cmd->non_ht_rates = cpu_to_le16(supp);
+ cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+ /* HT/VHT rates */
+ if (he_cap && he_cap->has_he) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ } else if (vht_cap && vht_cap->vht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_VHT;
+ rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+ } else if (ht_cap && ht_cap->ht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_HT;
+ cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ }
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tlc_update_notif *notif;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_lq_sta_rs_fw *lq_sta;
+ u32 flags;
+
+ rcu_read_lock();
+
+ notif = (void *)pkt->data;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+ notif->sta_id);
+ goto out;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!mvmsta) {
+ IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+ notif->sta_id);
+ goto out;
+ }
+
+ flags = le32_to_cpu(notif->flags);
+
+ lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+ IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+ lq_sta->last_rate_n_flags);
+ }
+
+ if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
+ u16 size = le32_to_cpu(notif->amsdu_size);
+
+ if (WARN_ON(sta->max_amsdu_len < size))
+ goto out;
+
+ mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->max_amsdu_len = size;
+
+ IWL_DEBUG_RATE(mvm,
+ "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+ le32_to_cpu(notif->amsdu_size), size,
+ mvmsta->amsdu_enabled);
+ }
+out:
+ rcu_read_unlock();
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool update)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+ u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+ struct ieee80211_supported_band *sband;
+ struct iwl_tlc_config_cmd cfg_cmd = {
+ .sta_id = mvmsta->sta_id,
+ .max_ch_width = update ?
+ rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
+ .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+ .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+ .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
+ .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+ .amsdu = iwl_mvm_is_csum_supported(mvm),
+ };
+ int ret;
+
+ memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm);
+#endif
+ sband = hw->wiphy->bands[band];
+ rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+ return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ lq_sta->pers.drv = mvm;
+ lq_sta->pers.sta_id = mvmsta->sta_id;
+ lq_sta->pers.chains = 0;
+ memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
+ lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
new file mode 100644
index 000000000..6f4508d62
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -0,0 +1,4163 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+#include "debugfs.h"
+
+#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
+
+/* Calculations of success ratio are done in fixed point where 12800 is 100%.
+ * Use this macro when dealing with thresholds consts set as a percentage
+ */
+#define RS_PERCENT(x) (128 * x)
+
+static u8 rs_ht_to_legacy[] = {
+ [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
+ [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
+ [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
+ [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
+ [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
+ [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
+ [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
+ [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
+};
+
+static const u8 ant_toggle_lookup[] = {
+ [ANT_NONE] = ANT_NONE,
+ [ANT_A] = ANT_B,
+ [ANT_B] = ANT_A,
+ [ANT_AB] = ANT_AB,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+ IWL_RATE_##rp##M_INDEX, \
+ IWL_RATE_##rn##M_INDEX }
+
+#define IWL_DECLARE_MCS_RATE(s) \
+ [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
+ IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+ IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+ IWL_RATE_INVM_INDEX, \
+ IWL_RATE_INVM_INDEX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
+ IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
+ IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
+ IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
+ IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
+ IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
+ IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
+ IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
+ IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
+ IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
+};
+
+enum rs_action {
+ RS_ACTION_STAY = 0,
+ RS_ACTION_DOWNSCALE = -1,
+ RS_ACTION_UPSCALE = 1,
+};
+
+enum rs_column_mode {
+ RS_INVALID = 0,
+ RS_LEGACY,
+ RS_SISO,
+ RS_MIMO2,
+};
+
+#define MAX_NEXT_COLUMNS 7
+#define MAX_COLUMN_CHECKS 3
+
+struct rs_tx_column;
+
+typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct rs_rate *rate,
+ const struct rs_tx_column *next_col);
+
+struct rs_tx_column {
+ enum rs_column_mode mode;
+ u8 ant;
+ bool sgi;
+ enum rs_column next_columns[MAX_NEXT_COLUMNS];
+ allow_column_func_t checks[MAX_COLUMN_CHECKS];
+};
+
+static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct rs_rate *rate,
+ const struct rs_tx_column *next_col)
+{
+ return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
+}
+
+static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct rs_rate *rate,
+ const struct rs_tx_column *next_col)
+{
+ if (!sta->ht_cap.ht_supported)
+ return false;
+
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ return false;
+
+ if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
+ return false;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ return false;
+
+ if (mvm->nvm_data->sku_cap_mimo_disabled)
+ return false;
+
+ return true;
+}
+
+static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct rs_rate *rate,
+ const struct rs_tx_column *next_col)
+{
+ if (!sta->ht_cap.ht_supported)
+ return false;
+
+ return true;
+}
+
+static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct rs_rate *rate,
+ const struct rs_tx_column *next_col)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ if (is_ht20(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ return true;
+ if (is_ht40(rate) && (ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ return true;
+ if (is_ht80(rate) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_80))
+ return true;
+ if (is_ht160(rate) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_160))
+ return true;
+
+ return false;
+}
+
+static const struct rs_tx_column rs_tx_columns[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_ant_allow,
+ },
+ },
+ [RS_COLUMN_LEGACY_ANT_B] = {
+ .mode = RS_LEGACY,
+ .ant = ANT_B,
+ .next_columns = {
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_ant_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_A] = {
+ .mode = RS_SISO,
+ .ant = ANT_A,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_ant_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_B] = {
+ .mode = RS_SISO,
+ .ant = ANT_B,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_ant_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_A_SGI] = {
+ .mode = RS_SISO,
+ .ant = ANT_A,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_ant_allow,
+ rs_sgi_allow,
+ },
+ },
+ [RS_COLUMN_SISO_ANT_B_SGI] = {
+ .mode = RS_SISO,
+ .ant = ANT_B,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_siso_allow,
+ rs_ant_allow,
+ rs_sgi_allow,
+ },
+ },
+ [RS_COLUMN_MIMO2] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_mimo_allow,
+ },
+ },
+ [RS_COLUMN_MIMO2_SGI] = {
+ .mode = RS_MIMO2,
+ .ant = ANT_AB,
+ .sgi = true,
+ .next_columns = {
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ },
+ .checks = {
+ rs_mimo_allow,
+ rs_sgi_allow,
+ },
+ },
+};
+
+static inline u8 rs_extract_rate(u32 rate_n_flags)
+{
+ /* also works for HT because bits 7:6 are zero there */
+ return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
+}
+
+static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
+
+ /* skip 9M not supported in HT*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx += 1;
+ if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
+ return idx;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+ rate_n_flags & RATE_MCS_HE_MSK) {
+ idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ idx += IWL_RATE_MCS_0_INDEX;
+
+ /* skip 9M not supported in VHT*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx++;
+ if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+ return idx;
+ if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+ (idx <= IWL_LAST_HE_RATE))
+ return idx;
+ } else {
+ /* legacy rate format, search for match in table */
+
+ u8 legacy_rate = rs_extract_rate(rate_n_flags);
+ for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
+ if (iwl_rates[idx].plcp == legacy_rate)
+ return idx;
+ }
+
+ return IWL_RATE_INVALID;
+}
+
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ int tid, bool ndp);
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate);
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
+};
+
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
+ {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
+ {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
+};
+
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
+ {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
+ {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
+ {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
+ {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+ {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
+};
+
+static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 191, 0, 244, 288, 298, 308, 313, 318, 323, 328, 330},
+ {0, 0, 0, 0, 200, 0, 251, 293, 302, 312, 317, 322, 327, 332, 334},
+ {0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581},
+ {0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165},
+};
+
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
+ {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+ {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
+};
+
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
+ {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+ {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
+ {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
+ {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+ {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
+};
+
+static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 240, 0, 278, 308, 313, 319, 322, 324, 328, 330, 334},
+ {0, 0, 0, 0, 247, 0, 282, 310, 315, 320, 323, 325, 329, 332, 338},
+ {0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629, 10592},
+ {0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM (8)
+
+static const char *rs_pretty_ant(u8 ant)
+{
+ static const char * const ant_name[] = {
+ [ANT_NONE] = "None",
+ [ANT_A] = "A",
+ [ANT_B] = "B",
+ [ANT_AB] = "AB",
+ [ANT_C] = "C",
+ [ANT_AC] = "AC",
+ [ANT_BC] = "BC",
+ [ANT_ABC] = "ABC",
+ };
+
+ if (ant > ANT_ABC)
+ return "UNKNOWN";
+
+ return ant_name[ant];
+}
+
+static const char *rs_pretty_lq_type(enum iwl_table_type type)
+{
+ static const char * const lq_types[] = {
+ [LQ_NONE] = "NONE",
+ [LQ_LEGACY_A] = "LEGACY_A",
+ [LQ_LEGACY_G] = "LEGACY_G",
+ [LQ_HT_SISO] = "HT SISO",
+ [LQ_HT_MIMO2] = "HT MIMO",
+ [LQ_VHT_SISO] = "VHT SISO",
+ [LQ_VHT_MIMO2] = "VHT MIMO",
+ [LQ_HE_SISO] = "HE SISO",
+ [LQ_HE_MIMO2] = "HE MIMO",
+ };
+
+ if (type < LQ_NONE || type >= LQ_MAX)
+ return "UNKNOWN";
+
+ return lq_types[type];
+}
+
+static char *rs_pretty_rate(const struct rs_rate *rate)
+{
+ static char buf[40];
+ static const char * const legacy_rates[] = {
+ [IWL_RATE_1M_INDEX] = "1M",
+ [IWL_RATE_2M_INDEX] = "2M",
+ [IWL_RATE_5M_INDEX] = "5.5M",
+ [IWL_RATE_11M_INDEX] = "11M",
+ [IWL_RATE_6M_INDEX] = "6M",
+ [IWL_RATE_9M_INDEX] = "9M",
+ [IWL_RATE_12M_INDEX] = "12M",
+ [IWL_RATE_18M_INDEX] = "18M",
+ [IWL_RATE_24M_INDEX] = "24M",
+ [IWL_RATE_36M_INDEX] = "36M",
+ [IWL_RATE_48M_INDEX] = "48M",
+ [IWL_RATE_54M_INDEX] = "54M",
+ };
+ static const char *const ht_vht_rates[] = {
+ [IWL_RATE_MCS_0_INDEX] = "MCS0",
+ [IWL_RATE_MCS_1_INDEX] = "MCS1",
+ [IWL_RATE_MCS_2_INDEX] = "MCS2",
+ [IWL_RATE_MCS_3_INDEX] = "MCS3",
+ [IWL_RATE_MCS_4_INDEX] = "MCS4",
+ [IWL_RATE_MCS_5_INDEX] = "MCS5",
+ [IWL_RATE_MCS_6_INDEX] = "MCS6",
+ [IWL_RATE_MCS_7_INDEX] = "MCS7",
+ [IWL_RATE_MCS_8_INDEX] = "MCS8",
+ [IWL_RATE_MCS_9_INDEX] = "MCS9",
+ };
+ const char *rate_str;
+
+ if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
+ rate_str = legacy_rates[rate->index];
+ else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
+ (rate->index >= IWL_RATE_MCS_0_INDEX) &&
+ (rate->index <= IWL_RATE_MCS_9_INDEX))
+ rate_str = ht_vht_rates[rate->index];
+ else
+ rate_str = "BAD_RATE";
+
+ sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
+ rs_pretty_ant(rate->ant), rate_str);
+ return buf;
+}
+
+static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
+ const char *prefix)
+{
+ IWL_DEBUG_RATE(mvm,
+ "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
+ prefix, rs_pretty_rate(rate), rate->bw,
+ rate->sgi, rate->ldpc, rate->stbc);
+}
+
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+ window->data = 0;
+ window->success_counter = 0;
+ window->success_ratio = IWL_INVALID_VALUE;
+ window->counter = 0;
+ window->average_tpt = IWL_INVALID_VALUE;
+}
+
+static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl)
+{
+ int i;
+
+ IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&tbl->win[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+ rs_rate_scale_clear_window(&tbl->tpc_win[i]);
+}
+
+static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_data, u8 tid,
+ struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+
+ IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+
+ /* start BA session until the peer sends del BA */
+ ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ return ret;
+}
+
+static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ u8 tid, struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_tid_data *tid_data;
+
+ /*
+ * In AP mode, tid can be equal to IWL_MAX_TID_COUNT
+ * when the frame is not QoS
+ */
+ if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) {
+ IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
+ return;
+ } else if (tid == IWL_MAX_TID_COUNT) {
+ return;
+ }
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED &&
+ tid_data->state == IWL_AGG_OFF &&
+ (lq_sta->tx_agg_tid_en & BIT(tid)) &&
+ tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD) {
+ IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
+ if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
+ tid_data->state = IWL_AGG_QUEUED;
+ }
+}
+
+static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
+/**
+ * rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate. window->data contains the bitmask of successful
+ * packets.
+ */
+static int _rs_collect_tx_data(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ struct iwl_rate_scale_data *window)
+{
+ static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ /* Get expected throughput */
+ tpt = get_expected_tpt(tbl, scale_index);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history window; anything older isn't really relevant any more.
+ * If we have filled up the sliding window, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (window->counter >= IWL_RATE_MAX_WINDOW) {
+ /* remove earliest */
+ window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+ if (window->data & mask) {
+ window->data &= ~mask;
+ window->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ window->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ window->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ window->success_counter++;
+ window->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (window->counter > 0)
+ window->success_ratio = 128 * (100 * window->success_counter)
+ / window->counter;
+ else
+ window->success_ratio = IWL_INVALID_VALUE;
+
+ fail_count = window->counter - window->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
+ (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
+ window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+ else
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ return 0;
+}
+
+static int rs_collect_tpc_data(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ u8 reduced_txp)
+{
+ struct iwl_rate_scale_data *window = NULL;
+
+ if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ return -EINVAL;
+
+ window = &tbl->tpc_win[reduced_txp];
+ return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+ window);
+}
+
+static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ u8 tid, int successes)
+{
+ struct iwl_mvm_tid_data *tid_data;
+
+ if (tid >= IWL_MAX_TID_COUNT)
+ return;
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ /*
+ * Measure if there're enough successful transmits per second.
+ * These statistics are used only to decide if we can start a
+ * BA session, so it should be updated only when A-MPDU is
+ * off.
+ */
+ if (tid_data->state != IWL_AGG_OFF)
+ return;
+
+ if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) ||
+ (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
+ tid_data->tx_count_last = tid_data->tx_count;
+ tid_data->tx_count = 0;
+ tid_data->tpt_meas_start = jiffies;
+ } else {
+ tid_data->tx_count += successes;
+ }
+}
+
+static int rs_collect_tlc_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, u8 tid,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ if (tbl->column != RS_COLUMN_INVALID) {
+ struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
+
+ pers->tx_stats[tbl->column][scale_index].total += attempts;
+ pers->tx_stats[tbl->column][scale_index].success += successes;
+ }
+
+ rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes);
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+ return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+ window);
+}
+
+/* Convert rs_rate object into ucode rate bitmask */
+static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
+ struct rs_rate *rate)
+{
+ u32 ucode_rate = 0;
+ int index = rate->index;
+
+ ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
+ if (is_legacy(rate)) {
+ ucode_rate |= iwl_rates[index].plcp;
+ if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+ ucode_rate |= RATE_MCS_CCK_MSK;
+ return ucode_rate;
+ }
+
+ if (is_ht(rate)) {
+ if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
+ IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
+ index = IWL_LAST_HT_RATE;
+ }
+ ucode_rate |= RATE_MCS_HT_MSK;
+
+ if (is_ht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_siso;
+ else if (is_ht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
+ else
+ WARN_ON_ONCE(1);
+ } else if (is_vht(rate)) {
+ if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+ IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+ index = IWL_LAST_VHT_RATE;
+ }
+ ucode_rate |= RATE_MCS_VHT_MSK;
+ if (is_vht_siso(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_siso;
+ else if (is_vht_mimo2(rate))
+ ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
+ else
+ WARN_ON_ONCE(1);
+
+ } else {
+ IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
+ }
+
+ if (is_siso(rate) && rate->stbc) {
+ /* To enable STBC we need to set both a flag and ANT_AB */
+ ucode_rate |= RATE_MCS_ANT_AB_MSK;
+ ucode_rate |= RATE_MCS_STBC_MSK;
+ }
+
+ ucode_rate |= rate->bw;
+ if (rate->sgi)
+ ucode_rate |= RATE_MCS_SGI_MSK;
+ if (rate->ldpc)
+ ucode_rate |= RATE_MCS_LDPC_MSK;
+
+ return ucode_rate;
+}
+
+/* Convert a ucode rate into an rs_rate object */
+static int rs_rate_from_ucode_rate(const u32 ucode_rate,
+ enum nl80211_band band,
+ struct rs_rate *rate)
+{
+ u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+ u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
+ u8 nss;
+
+ memset(rate, 0, sizeof(*rate));
+ rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
+
+ if (rate->index == IWL_RATE_INVALID)
+ return -EINVAL;
+
+ rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
+
+ /* Legacy */
+ if (!(ucode_rate & RATE_MCS_HT_MSK) &&
+ !(ucode_rate & RATE_MCS_VHT_MSK) &&
+ !(ucode_rate & RATE_MCS_HE_MSK)) {
+ if (num_of_ant == 1) {
+ if (band == NL80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+ }
+
+ return 0;
+ }
+
+ /* HT, VHT or HE */
+ if (ucode_rate & RATE_MCS_SGI_MSK)
+ rate->sgi = true;
+ if (ucode_rate & RATE_MCS_LDPC_MSK)
+ rate->ldpc = true;
+ if (ucode_rate & RATE_MCS_STBC_MSK)
+ rate->stbc = true;
+ if (ucode_rate & RATE_MCS_BF_MSK)
+ rate->bfer = true;
+
+ rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
+
+ if (ucode_rate & RATE_MCS_HT_MSK) {
+ nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
+ RATE_HT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_HT_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d",
+ rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_HT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ } else if (ucode_rate & RATE_MCS_VHT_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_VHT_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d",
+ rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_VHT_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ } else if (ucode_rate & RATE_MCS_HE_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_HE_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d", rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_HE_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
+ !is_he(rate) && !is_vht(rate));
+
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
+{
+ u8 new_ant_type;
+
+ if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C))
+ return 0;
+
+ if (!rs_is_valid_ant(valid_ant, rate->ant))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[rate->ant];
+
+ while ((new_ant_type != rate->ant) &&
+ !rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == rate->ant)
+ return 0;
+
+ rate->ant = new_ant_type;
+
+ return 1;
+}
+
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ if (is_legacy(rate))
+ return lq_sta->active_legacy_rate;
+ else if (is_siso(rate))
+ return lq_sta->active_siso_rate;
+ else if (is_mimo2(rate))
+ return lq_sta->active_mimo2_rate;
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = IWL_RATE_INVALID;
+ u8 low = IWL_RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = index - 1;
+ if (i >= 0)
+ mask = BIT(i);
+ for (; i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = index + 1;
+ for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = index;
+ while (low != IWL_RATE_INVALID) {
+ low = iwl_rates[low].prev_rs;
+ if (low == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ }
+
+ high = index;
+ while (high != IWL_RATE_INVALID) {
+ high = iwl_rates[high].next_rs;
+ if (high == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ }
+
+ return (high << 8) | low;
+}
+
+static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
+}
+
+/* Get the next supported lower rate in the current column.
+ * Return true if bottom rate in the current column was reached
+ */
+static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ u8 low;
+ u16 high_low;
+ u16 rate_mask;
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
+ high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
+ rate->type);
+ low = high_low & 0xff;
+
+ /* Bottom rate of column reached */
+ if (low == IWL_RATE_INVALID)
+ return true;
+
+ rate->index = low;
+ return false;
+}
+
+/* Get the next rate to use following a column downgrade */
+static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate)
+{
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+
+ if (is_legacy(rate)) {
+ /* No column to downgrade from Legacy */
+ return;
+ } else if (is_siso(rate)) {
+ /* Downgrade to Legacy if we were in SISO */
+ if (lq_sta->band == NL80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
+ rate->index > IWL_RATE_MCS_9_INDEX);
+
+ rate->index = rs_ht_to_legacy[rate->index];
+ rate->ldpc = false;
+ } else {
+ /* Downgrade to SISO with same MCS if in MIMO */
+ rate->type = is_vht_mimo2(rate) ?
+ LQ_VHT_SISO : LQ_HT_SISO;
+ }
+
+ if (num_of_ant(rate->ant) > 1)
+ rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
+
+ /* Relevant in both switching to SISO or Legacy */
+ rate->sgi = false;
+
+ if (!rs_rate_supported(lq_sta, rate))
+ rs_get_lower_rate_in_column(lq_sta, rate);
+}
+
+/* Check if both rates share the same column */
+static inline bool rs_rate_column_match(struct rs_rate *a,
+ struct rs_rate *b)
+{
+ bool ant_match;
+
+ if (a->stbc || a->bfer)
+ ant_match = (b->ant == ANT_A || b->ant == ANT_B);
+ else
+ ant_match = (a->ant == b->ant);
+
+ return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
+ && ant_match;
+}
+
+static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
+{
+ if (is_legacy(rate)) {
+ if (rate->ant == ANT_A)
+ return RS_COLUMN_LEGACY_ANT_A;
+
+ if (rate->ant == ANT_B)
+ return RS_COLUMN_LEGACY_ANT_B;
+
+ goto err;
+ }
+
+ if (is_siso(rate)) {
+ if (rate->ant == ANT_A || rate->stbc || rate->bfer)
+ return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
+ RS_COLUMN_SISO_ANT_A;
+
+ if (rate->ant == ANT_B)
+ return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
+ RS_COLUMN_SISO_ANT_B;
+
+ goto err;
+ }
+
+ if (is_mimo(rate))
+ return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
+
+err:
+ return RS_COLUMN_INVALID;
+}
+
+static u8 rs_get_tid(struct ieee80211_hdr *hdr)
+{
+ u8 tid = IWL_MAX_TID_COUNT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ if (unlikely(tid > IWL_MAX_TID_COUNT))
+ tid = IWL_MAX_TID_COUNT;
+
+ return tid;
+}
+
+void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info, bool ndp)
+{
+ int legacy_success;
+ int retries;
+ int i;
+ struct iwl_lq_cmd *table;
+ u32 lq_hwrate;
+ struct rs_rate lq_rate, tx_resp_rate;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+ u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+ u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
+ u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->pers.drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+ &tx_resp_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* Disable last tx check if we are debugging with fixed rate but
+ * update tx stats */
+ if (lq_sta->pers.dbg_fixed_rate) {
+ int index = tx_resp_rate.index;
+ enum rs_column column;
+ int attempts, success;
+
+ column = rs_get_column_from_rate(&tx_resp_rate);
+ if (WARN_ONCE(column == RS_COLUMN_INVALID,
+ "Can't map rate 0x%x to column",
+ tx_resp_hwrate))
+ return;
+
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ attempts = info->status.ampdu_len;
+ success = info->status.ampdu_ack_len;
+ } else {
+ attempts = info->status.rates[0].count;
+ success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ }
+
+ lq_sta->pers.tx_stats[column][index].total += attempts;
+ lq_sta->pers.tx_stats[column][index].success += success;
+
+ IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+ tx_resp_hwrate, success, attempts);
+ return;
+ }
+#endif
+
+ if (time_after(jiffies,
+ (unsigned long)(lq_sta->last_tx +
+ (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
+ IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
+ iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
+ return;
+ }
+ lq_sta->last_tx = jiffies;
+
+ /* Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ lq_hwrate = le32_to_cpu(table->rs_table[0]);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Here we actually compare this rate to the latest LQ command */
+ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
+ IWL_DEBUG_RATE(mvm,
+ "tx resp color 0x%x does not match 0x%x\n",
+ lq_color, LQ_FLAG_COLOR_GET(table->flags));
+
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ if (!lq_sta->search_better_tbl) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ }
+
+ if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
+ IWL_DEBUG_RATE(mvm,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+ rs_dump_rate(mvm, &lq_rate, "ACTUAL");
+
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len,
+ reduced_txp);
+
+ /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+ * it as a single frame loss as we don't want the success ratio
+ * to dip too quickly because a BA wasn't received.
+ * For TPC, there's no need for this optimisation since we want
+ * to recover very quickly from a bad power reduction and,
+ * therefore we'd like the success ratio to get an immediate hit
+ * when failing to get a BA, so we'd switch back to a lower or
+ * zero power reduction. When FW transmits agg with a rate
+ * different from the initial rate, it will not use reduced txp
+ * and will send BA notification twice (one empty with reduced
+ * txp equal to the value from LQ and one with reduced txp 0).
+ * We need to update counters for each txp level accordingly.
+ */
+ if (info->status.ampdu_ack_len == 0)
+ info->status.ampdu_len = 1;
+
+ rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /* For legacy, update frame history with for each Tx retry. */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ lq_hwrate = le32_to_cpu(table->rs_table[i]);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
+ tmp_tbl = curr_tbl;
+ else if (rs_rate_column_match(&lq_rate,
+ &other_tbl->rate))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+
+ rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+ tx_resp_rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
+ rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
+ tx_resp_rate.index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = lq_hwrate;
+ IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[info->band])
+ rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_op_mode *op_mode = mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ return;
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
+ ieee80211_is_qos_nullfunc(hdr->frame_control));
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
+ struct iwl_lq_sta *lq_sta)
+{
+ IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
+ lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
+ if (is_legacy) {
+ lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->visited_columns = 0;
+}
+
+static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
+{
+ if (rate_mask)
+ return find_last_bit(&rate_mask, BITS_PER_LONG);
+ return IWL_RATE_INVALID;
+}
+
+static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column)
+{
+ switch (column->mode) {
+ case RS_LEGACY:
+ return lq_sta->max_legacy_rate_idx;
+ case RS_SISO:
+ return lq_sta->max_siso_rate_idx;
+ case RS_MIMO2:
+ return lq_sta->max_mimo2_rate_idx;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return lq_sta->max_legacy_rate_idx;
+}
+
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ const struct rs_tx_column *column,
+ u32 bw)
+{
+ /* Used to choose among HT tables */
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+ if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
+ column->mode != RS_SISO &&
+ column->mode != RS_MIMO2))
+ return expected_tpt_legacy;
+
+ /* Legacy rates have only one table */
+ if (column->mode == RS_LEGACY)
+ return expected_tpt_legacy;
+
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
+ * status */
+ if (column->mode == RS_SISO) {
+ switch (bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_siso_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_siso_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_siso_80MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ ht_tbl_pointer = expected_tpt_siso_160MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else if (column->mode == RS_MIMO2) {
+ switch (bw) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ ht_tbl_pointer = expected_tpt_mimo2_160MHz;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ if (!column->sgi && !lq_sta->is_agg) /* Normal */
+ return ht_tbl_pointer[0];
+ else if (column->sgi && !lq_sta->is_agg) /* SGI */
+ return ht_tbl_pointer[1];
+ else if (!column->sgi && lq_sta->is_agg) /* AGG */
+ return ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ return ht_tbl_pointer[3];
+}
+
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct rs_rate *rate = &tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
+
+ tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
+}
+
+static s32 rs_get_best_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl, /* "search" */
+ unsigned long rate_mask, s8 index)
+{
+ struct iwl_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 success_ratio = active_tbl->win[index].success_ratio;
+ u16 expected_current_tpt = active_tbl->expected_tpt[index];
+ const u16 *tpt_tbl = tbl->expected_tpt;
+ u16 high_low;
+ u32 target_tpt;
+ int rate_idx;
+
+ if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
+ target_tpt = 100 * expected_current_tpt;
+ IWL_DEBUG_RATE(mvm,
+ "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
+ success_ratio, target_tpt);
+ } else {
+ target_tpt = lq_sta->last_tpt;
+ IWL_DEBUG_RATE(mvm,
+ "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
+ success_ratio, target_tpt);
+ }
+
+ rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
+
+ while (rate_idx != IWL_RATE_INVALID) {
+ if (target_tpt < (100 * tpt_tbl[rate_idx]))
+ break;
+
+ high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
+ tbl->rate.type);
+
+ rate_idx = (high_low >> 8) & 0xff;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
+ rate_idx, target_tpt,
+ rate_idx != IWL_RATE_INVALID ?
+ 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
+
+ return rate_idx;
+}
+
+static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ return RATE_MCS_CHAN_WIDTH_160;
+ case IEEE80211_STA_RX_BW_80:
+ return RATE_MCS_CHAN_WIDTH_80;
+ case IEEE80211_STA_RX_BW_40:
+ return RATE_MCS_CHAN_WIDTH_40;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return RATE_MCS_CHAN_WIDTH_20;
+ }
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct iwl_mvm *mvm;
+
+ mvm = lq_sta->pers.drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+ (lq_sta->total_success > lq_sta->max_success_limit) ||
+ ((!lq_sta->search_better_tbl) &&
+ (lq_sta->flush_timer) && (flush_interval_passed))) {
+ IWL_DEBUG_RATE(mvm,
+ "LQ: stay is expired %d %d %d\n",
+ lq_sta->total_failed,
+ lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
+ IWL_DEBUG_RATE(mvm,
+ "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+ /* mark the current column as visited */
+ lq_sta->visited_columns = BIT(tbl->column);
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >=
+ lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ IWL_DEBUG_RATE(mvm,
+ "LQ: stay in table clear win\n");
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
+ }
+ }
+}
+
+static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl,
+ enum rs_action scale_action)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /*
+ * In case TLC offload is not active amsdu_enabled is either 0xFFFF
+ * or 0, since there is no per-TID alg.
+ */
+ if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
+ tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
+ scale_action == RS_ACTION_DOWNSCALE)
+ mvmsta->amsdu_enabled = 0;
+ else
+ mvmsta->amsdu_enabled = 0xFFFF;
+
+ mvmsta->max_amsdu_len = sta->max_amsdu_len;
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+}
+
+static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ enum rs_action scale_action)
+{
+ if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
+ return false;
+
+ if (!is_vht_siso(&tbl->rate))
+ return false;
+
+ if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
+ (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
+ (scale_action == RS_ACTION_DOWNSCALE)) {
+ tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
+ tbl->rate.index = IWL_RATE_MCS_4_INDEX;
+ IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
+ goto tweaked;
+ }
+
+ /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
+ * sustainable, i.e. we're past the test window. We can't go back
+ * if MCS5 is just tested as this will happen always after switching
+ * to 20Mhz MCS4 because the rate stats are cleared.
+ */
+ if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
+ (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
+ (scale_action == RS_ACTION_STAY)) ||
+ ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
+ (scale_action == RS_ACTION_UPSCALE)))) {
+ tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
+ tbl->rate.index = IWL_RATE_MCS_1_INDEX;
+ IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
+ goto tweaked;
+ }
+
+ return false;
+
+tweaked:
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
+ return true;
+}
+
+static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ int i, j, max_rate;
+ enum rs_column next_col_id;
+ const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
+ const struct rs_tx_column *next_col;
+ allow_column_func_t allow_func;
+ u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
+ const u16 *expected_tpt_tbl;
+ u16 tpt, max_expected_tpt;
+
+ for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+ next_col_id = curr_col->next_columns[i];
+
+ if (next_col_id == RS_COLUMN_INVALID)
+ continue;
+
+ if (lq_sta->visited_columns & BIT(next_col_id)) {
+ IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
+ next_col_id);
+ continue;
+ }
+
+ next_col = &rs_tx_columns[next_col_id];
+
+ if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
+ next_col_id, valid_ants, next_col->ant);
+ continue;
+ }
+
+ for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+ allow_func = next_col->checks[j];
+ if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+ next_col))
+ break;
+ }
+
+ if (j != MAX_COLUMN_CHECKS) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: not allowed (check %d failed)\n",
+ next_col_id, j);
+
+ continue;
+ }
+
+ tpt = lq_sta->last_tpt / 100;
+ expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
+ rs_bw_from_sta_bw(sta));
+ if (WARN_ON_ONCE(!expected_tpt_tbl))
+ continue;
+
+ max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
+ if (max_rate == IWL_RATE_INVALID) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: no rate is allowed in this column\n",
+ next_col_id);
+ continue;
+ }
+
+ max_expected_tpt = expected_tpt_tbl[max_rate];
+ if (tpt >= max_expected_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
+ next_col_id, max_expected_tpt, tpt);
+ continue;
+ }
+
+ IWL_DEBUG_RATE(mvm,
+ "Found potential column %d. Max expected %d current %d\n",
+ next_col_id, max_expected_tpt, tpt);
+ break;
+ }
+
+ if (i == MAX_NEXT_COLUMNS)
+ return RS_COLUMN_INVALID;
+
+ return next_col_id;
+}
+
+static int rs_switch_to_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta *sta,
+ enum rs_column col_id)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct rs_rate *rate = &search_tbl->rate;
+ const struct rs_tx_column *column = &rs_tx_columns[col_id];
+ const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
+ unsigned long rate_mask = 0;
+ u32 rate_idx = 0;
+
+ memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
+
+ rate->sgi = column->sgi;
+ rate->ant = column->ant;
+
+ if (column->mode == RS_LEGACY) {
+ if (lq_sta->band == NL80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+ rate->ldpc = false;
+ rate_mask = lq_sta->active_legacy_rate;
+ } else if (column->mode == RS_SISO) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ rate_mask = lq_sta->active_siso_rate;
+ } else if (column->mode == RS_MIMO2) {
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ rate_mask = lq_sta->active_mimo2_rate;
+ } else {
+ WARN_ONCE(1, "Bad column mode");
+ }
+
+ if (column->mode != RS_LEGACY) {
+ rate->bw = rs_bw_from_sta_bw(sta);
+ rate->ldpc = lq_sta->ldpc;
+ }
+
+ search_tbl->column = col_id;
+ rs_set_expected_tpt_table(lq_sta, search_tbl);
+
+ lq_sta->visited_columns |= BIT(col_id);
+
+ /* Get the best matching rate if we're changing modes. e.g.
+ * SISO->MIMO, LEGACY->SISO, MIMO->SISO
+ */
+ if (curr_column->mode != column->mode) {
+ rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
+ rate_mask, rate->index);
+
+ if ((rate_idx == IWL_RATE_INVALID) ||
+ !(BIT(rate_idx) & rate_mask)) {
+ IWL_DEBUG_RATE(mvm,
+ "can not switch with index %d"
+ " rate mask %lx\n",
+ rate_idx, rate_mask);
+
+ goto err;
+ }
+
+ rate->index = rate_idx;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
+ col_id, rate->index);
+
+ return 0;
+
+err:
+ rate->type = LQ_NONE;
+ return -1;
+}
+
+static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl,
+ s32 sr, int low, int high,
+ int current_tpt,
+ int low_tpt, int high_tpt)
+{
+ enum rs_action action = RS_ACTION_STAY;
+
+ if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
+ (current_tpt == 0)) {
+ IWL_DEBUG_RATE(mvm,
+ "Decrease rate because of low SR\n");
+ return RS_ACTION_DOWNSCALE;
+ }
+
+ if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE) &&
+ (high != IWL_RATE_INVALID)) {
+ IWL_DEBUG_RATE(mvm,
+ "No data about high/low rates. Increase rate\n");
+ return RS_ACTION_UPSCALE;
+ }
+
+ if ((high_tpt == IWL_INVALID_VALUE) &&
+ (high != IWL_RATE_INVALID) &&
+ (low_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "No data about high rate and low rate is worse. Increase rate\n");
+ return RS_ACTION_UPSCALE;
+ }
+
+ if ((high_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Higher rate is better. Increate rate\n");
+ return RS_ACTION_UPSCALE;
+ }
+
+ if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Both high and low are worse. Maintain rate\n");
+ return RS_ACTION_STAY;
+ }
+
+ if ((low_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "Lower rate is better\n");
+ action = RS_ACTION_DOWNSCALE;
+ goto out;
+ }
+
+ if ((low_tpt == IWL_INVALID_VALUE) &&
+ (low != IWL_RATE_INVALID)) {
+ IWL_DEBUG_RATE(mvm,
+ "No data about lower rate\n");
+ action = RS_ACTION_DOWNSCALE;
+ goto out;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Maintain rate\n");
+
+out:
+ if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
+ if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
+ IWL_DEBUG_RATE(mvm,
+ "SR is above NO DECREASE. Avoid downscale\n");
+ action = RS_ACTION_STAY;
+ } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
+ IWL_DEBUG_RATE(mvm,
+ "Current TPT is higher than max expected in low rate. Avoid downscale\n");
+ action = RS_ACTION_STAY;
+ } else {
+ IWL_DEBUG_RATE(mvm, "Decrease rate\n");
+ }
+ }
+
+ return action;
+}
+
+static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
+ * supports STBC of at least 1*SS
+ */
+ if (!lq_sta->stbc_capable)
+ return false;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ return false;
+
+ return true;
+}
+
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+ int *weaker, int *stronger)
+{
+ *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
+ if (*weaker > TPC_MAX_REDUCTION)
+ *weaker = TPC_INVALID;
+
+ *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
+ if (*stronger < 0)
+ *stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct rs_rate *rate, enum nl80211_band band)
+{
+ int index = rate->index;
+ bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+ bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+ !vif->bss_conf.ps);
+
+ IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
+ cam, sta_ps_disabled);
+ /*
+ * allow tpc only if power management is enabled, or bt coex
+ * activity grade allows it and we are on 2.4Ghz.
+ */
+ if ((cam || sta_ps_disabled) &&
+ !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+ return false;
+
+ IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+ if (is_legacy(rate))
+ return index == IWL_RATE_54M_INDEX;
+ if (is_ht(rate))
+ return index == IWL_RATE_MCS_7_INDEX;
+ if (is_vht(rate))
+ return index == IWL_RATE_MCS_7_INDEX ||
+ index == IWL_RATE_MCS_8_INDEX ||
+ index == IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+enum tpc_action {
+ TPC_ACTION_STAY,
+ TPC_ACTION_DECREASE,
+ TPC_ACTION_INCREASE,
+ TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+ s32 sr, int weak, int strong,
+ int current_tpt,
+ int weak_tpt, int strong_tpt)
+{
+ /* stay until we have valid tpt */
+ if (current_tpt == IWL_INVALID_VALUE) {
+ IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+ return TPC_ACTION_STAY;
+ }
+
+ /* Too many failures, increase txp */
+ if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
+ current_tpt == 0) {
+ IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+ return TPC_ACTION_NO_RESTIRCTION;
+ }
+
+ /* try decreasing first if applicable */
+ if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+ weak != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ current_tpt >= strong_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "no weak txp measurement. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+
+ if (weak_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has better tpt. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+ }
+
+ /* next, increase if needed */
+ if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+ strong != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ strong_tpt != IWL_INVALID_VALUE &&
+ current_tpt < strong_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "higher txp has better tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+
+ if (weak_tpt < current_tpt &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ strong_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has worse tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+ }
+
+ IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+ return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mvm_sta->vif;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum nl80211_band band;
+ struct iwl_rate_scale_data *window;
+ struct rs_rate *rate = &tbl->rate;
+ enum tpc_action action;
+ s32 sr;
+ u8 cur = lq_sta->lq.reduced_tpc;
+ int current_tpt;
+ int weak, strong;
+ int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+ IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
+ lq_sta->pers.dbg_fixed_txp_reduction);
+ lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction;
+ return cur != lq_sta->pers.dbg_fixed_txp_reduction;
+ }
+#endif
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!chanctx_conf))
+ band = NUM_NL80211_BANDS;
+ else
+ band = chanctx_conf->def.chan->band;
+ rcu_read_unlock();
+
+ if (!rs_tpc_allowed(mvm, vif, rate, band)) {
+ IWL_DEBUG_RATE(mvm,
+ "tpc is not allowed. remove txp restrictions\n");
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return cur != TPC_NO_REDUCTION;
+ }
+
+ rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+ /* Collect measured throughputs for current and adjacent rates */
+ window = tbl->tpc_win;
+ sr = window[cur].success_ratio;
+ current_tpt = window[cur].average_tpt;
+ if (weak != TPC_INVALID)
+ weak_tpt = window[weak].average_tpt;
+ if (strong != TPC_INVALID)
+ strong_tpt = window[strong].average_tpt;
+
+ IWL_DEBUG_RATE(mvm,
+ "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+ cur, current_tpt, sr, weak, strong,
+ weak_tpt, strong_tpt);
+
+ action = rs_get_tpc_action(mvm, sr, weak, strong,
+ current_tpt, weak_tpt, strong_tpt);
+
+ /* override actions if we are on the edge */
+ if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+ IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
+ action = TPC_ACTION_STAY;
+ } else if (strong == TPC_INVALID &&
+ (action == TPC_ACTION_INCREASE ||
+ action == TPC_ACTION_NO_RESTIRCTION)) {
+ IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
+ action = TPC_ACTION_STAY;
+ }
+
+ switch (action) {
+ case TPC_ACTION_DECREASE:
+ lq_sta->lq.reduced_tpc = weak;
+ return true;
+ case TPC_ACTION_INCREASE:
+ lq_sta->lq.reduced_tpc = strong;
+ return true;
+ case TPC_ACTION_NO_RESTIRCTION:
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return true;
+ case TPC_ACTION_STAY:
+ /* do nothing */
+ break;
+ }
+ return false;
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ int tid, bool ndp)
+{
+ int low = IWL_RATE_INVALID;
+ int high = IWL_RATE_INVALID;
+ int index;
+ struct iwl_rate_scale_data *window = NULL;
+ int current_tpt = IWL_INVALID_VALUE;
+ int low_tpt = IWL_INVALID_VALUE;
+ int high_tpt = IWL_INVALID_VALUE;
+ u32 fail_count;
+ enum rs_action scale_action = RS_ACTION_STAY;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct iwl_scale_tbl_info *tbl, *tbl1;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 prev_agg = lq_sta->is_agg;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct rs_rate *rate;
+
+ lq_sta->is_agg = !!mvmsta->agg_tids;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
+
+ if (prev_agg != lq_sta->is_agg) {
+ IWL_DEBUG_RATE(mvm,
+ "Aggregation changed: prev %d current %d. Update expected TPT table\n",
+ prev_agg, lq_sta->is_agg);
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
+ }
+
+ /* current tx rate */
+ index = rate->index;
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = rs_get_supported_rates(lq_sta, rate);
+
+ if (!(BIT(index) & rate_mask)) {
+ IWL_ERR(mvm, "Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid*/
+ rate->type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history window for current rate */
+ if (!tbl->expected_tpt) {
+ IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
+ window = &(tbl->win[index]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = window->counter - window->success_counter;
+ if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
+ (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
+ IWL_DEBUG_RATE(mvm,
+ "%s: Test Window: succ %d total %d\n",
+ rs_pretty_rate(rate),
+ window->success_counter, window->counter);
+
+ /* Can't calculate this yet; not enough history */
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ rs_stay_in_table(lq_sta, false);
+
+ return;
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (window->average_tpt > lq_sta->last_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "SWITCHING TO NEW TABLE SR: %d "
+ "cur-tpt %d old-tpt %d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = window->average_tpt;
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "GOING BACK TO THE OLD TABLE: SR %d "
+ "cur-tpt %d old-tpt %d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ rate->type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ index = tbl->rate.index;
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
+
+ sr = window->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = window->average_tpt;
+ if (low != IWL_RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != IWL_RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ IWL_DEBUG_RATE(mvm,
+ "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+ rs_pretty_rate(rate), current_tpt, sr,
+ low, high, low_tpt, high_tpt);
+
+ scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
+ current_tpt, low_tpt, high_tpt);
+
+ /* Force a search in case BT doesn't like us being in MIMO */
+ if (is_mimo(rate) &&
+ !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
+ IWL_DEBUG_RATE(mvm,
+ "BT Coex forbids MIMO. Search for new config\n");
+ rs_stay_in_table(lq_sta, true);
+ goto lq_update;
+ }
+
+ switch (scale_action) {
+ case RS_ACTION_DOWNSCALE:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = low;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "At the bottom rate. Can't decrease\n");
+ }
+
+ break;
+ case RS_ACTION_UPSCALE:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = high;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "At the top rate. Can't increase\n");
+ }
+
+ break;
+ case RS_ACTION_STAY:
+ /* No change */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
+ update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+ break;
+ default:
+ break;
+ }
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq) {
+ tbl->rate.index = index;
+ if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
+ rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
+ rs_set_amsdu_len(mvm, sta, tbl, scale_action);
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
+ }
+
+ rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search &&
+ lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
+ && window->counter) {
+ enum rs_column next_column;
+
+ /* Save current throughput to compare with "search" throughput*/
+ lq_sta->last_tpt = current_tpt;
+
+ IWL_DEBUG_RATE(mvm,
+ "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
+ update_lq, done_search, lq_sta->rs_state,
+ window->counter);
+
+ next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
+ if (next_column != RS_COLUMN_INVALID) {
+ int ret = rs_switch_to_column(mvm, lq_sta, sta,
+ next_column);
+ if (!ret)
+ lq_sta->search_better_tbl = 1;
+ } else {
+ IWL_DEBUG_RATE(mvm,
+ "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
+ lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
+ }
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ rs_rate_scale_clear_tbl_windows(mvm, tbl);
+
+ /* Use new "search" start rate */
+ index = tbl->rate.index;
+
+ rs_dump_rate(mvm, &tbl->rate,
+ "Switch to SEARCH TABLE:");
+ rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
+ } else {
+ done_search = 1;
+ }
+ }
+
+ if (!ndp)
+ rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta);
+
+ if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta);
+ }
+}
+
+struct rs_init_rate_info {
+ s8 rssi;
+ u8 rate_idx;
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
+ { -60, IWL_RATE_54M_INDEX },
+ { -64, IWL_RATE_48M_INDEX },
+ { -68, IWL_RATE_36M_INDEX },
+ { -80, IWL_RATE_24M_INDEX },
+ { -84, IWL_RATE_18M_INDEX },
+ { -85, IWL_RATE_12M_INDEX },
+ { -86, IWL_RATE_11M_INDEX },
+ { -88, IWL_RATE_5M_INDEX },
+ { -90, IWL_RATE_2M_INDEX },
+ { S8_MIN, IWL_RATE_1M_INDEX },
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
+ { -60, IWL_RATE_54M_INDEX },
+ { -64, IWL_RATE_48M_INDEX },
+ { -72, IWL_RATE_36M_INDEX },
+ { -80, IWL_RATE_24M_INDEX },
+ { -84, IWL_RATE_18M_INDEX },
+ { -85, IWL_RATE_12M_INDEX },
+ { -87, IWL_RATE_9M_INDEX },
+ { S8_MIN, IWL_RATE_6M_INDEX },
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+ { -60, IWL_RATE_MCS_7_INDEX },
+ { -64, IWL_RATE_MCS_6_INDEX },
+ { -68, IWL_RATE_MCS_5_INDEX },
+ { -72, IWL_RATE_MCS_4_INDEX },
+ { -80, IWL_RATE_MCS_3_INDEX },
+ { -84, IWL_RATE_MCS_2_INDEX },
+ { -85, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+/* MCS index 9 is not valid for 20MHz VHT channel width,
+ * but is ok for 40, 80 and 160MHz channels.
+ */
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+ { -60, IWL_RATE_MCS_8_INDEX },
+ { -64, IWL_RATE_MCS_7_INDEX },
+ { -68, IWL_RATE_MCS_6_INDEX },
+ { -72, IWL_RATE_MCS_5_INDEX },
+ { -80, IWL_RATE_MCS_4_INDEX },
+ { -84, IWL_RATE_MCS_3_INDEX },
+ { -85, IWL_RATE_MCS_2_INDEX },
+ { -87, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht[] = {
+ { -60, IWL_RATE_MCS_9_INDEX },
+ { -64, IWL_RATE_MCS_8_INDEX },
+ { -68, IWL_RATE_MCS_7_INDEX },
+ { -72, IWL_RATE_MCS_6_INDEX },
+ { -80, IWL_RATE_MCS_5_INDEX },
+ { -84, IWL_RATE_MCS_4_INDEX },
+ { -85, IWL_RATE_MCS_3_INDEX },
+ { -87, IWL_RATE_MCS_2_INDEX },
+ { -88, IWL_RATE_MCS_1_INDEX },
+ { S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+#define IWL_RS_LOW_RSSI_THRESHOLD (-76) /* dBm */
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct rs_rate *rate = &lq_sta->optimal_rate;
+
+ if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+ rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+ else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+ rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+ else if (lq_sta->band == NL80211_BAND_5GHZ)
+ rate->type = LQ_LEGACY_A;
+ else
+ rate->type = LQ_LEGACY_G;
+
+ rate->bw = rs_bw_from_sta_bw(sta);
+ rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+ /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+ if (is_mimo(rate)) {
+ lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+ } else if (is_siso(rate)) {
+ lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+ } else {
+ lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+ if (lq_sta->band == NL80211_BAND_5GHZ) {
+ lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+ } else {
+ lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+ }
+ }
+
+ if (is_vht(rate)) {
+ if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+ lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+ } else {
+ lq_sta->optimal_rates = rs_optimal_rates_vht;
+ lq_sta->optimal_nentries =
+ ARRAY_SIZE(rs_optimal_rates_vht);
+ }
+ } else if (is_ht(rate)) {
+ lq_sta->optimal_rates = rs_optimal_rates_ht;
+ lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+ }
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct rs_rate *rate = &lq_sta->optimal_rate;
+ int i;
+
+ rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+ BITS_PER_LONG);
+
+ for (i = 0; i < lq_sta->optimal_nentries; i++) {
+ int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+ if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+ (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+ rate->index = rate_idx;
+ break;
+ }
+ }
+
+ return rate;
+}
+
+/* Choose an initial legacy rate and antenna to use based on the RSSI
+ * of last Rx
+ */
+static void rs_get_initial_rate(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ enum nl80211_band band,
+ struct rs_rate *rate)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int i, nentries;
+ unsigned long active_rate;
+ s8 best_rssi = S8_MIN;
+ u8 best_ant = ANT_NONE;
+ u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+ const struct rs_init_rate_info *initial_rates;
+
+ for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+ if (!(lq_sta->pers.chains & BIT(i)))
+ continue;
+
+ if (lq_sta->pers.chain_signal[i] > best_rssi) {
+ best_rssi = lq_sta->pers.chain_signal[i];
+ best_ant = BIT(i);
+ }
+ }
+
+ IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n",
+ rs_pretty_ant(best_ant), best_rssi);
+
+ if (best_ant != ANT_A && best_ant != ANT_B)
+ rate->ant = first_antenna(valid_tx_ant);
+ else
+ rate->ant = best_ant;
+
+ rate->sgi = false;
+ rate->ldpc = false;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+ rate->index = find_first_bit(&lq_sta->active_legacy_rate,
+ BITS_PER_LONG);
+
+ if (band == NL80211_BAND_5GHZ) {
+ rate->type = LQ_LEGACY_A;
+ initial_rates = rs_optimal_rates_5ghz_legacy;
+ nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+ } else {
+ rate->type = LQ_LEGACY_G;
+ initial_rates = rs_optimal_rates_24ghz_legacy;
+ nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+ }
+
+ if (!IWL_MVM_RS_RSSI_BASED_INIT_RATE)
+ goto out;
+
+ /* Start from a higher rate if the corresponding debug capability
+ * is enabled. The rate is chosen according to AP capabilities.
+ * In case of VHT/HT when the rssi is low fallback to the case of
+ * legacy rates.
+ */
+ if (sta->vht_cap.vht_supported &&
+ best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
+ /*
+ * In AP mode, when a new station associates, rs is initialized
+ * immediately upon association completion, before the phy
+ * context is updated with the association parameters, so the
+ * sta bandwidth might be wider than the phy context allows.
+ * To avoid this issue, always initialize rs with 20mhz
+ * bandwidth rate, and after authorization, when the phy context
+ * is already up-to-date, re-init rs with the correct bw.
+ */
+ u32 bw = mvmsta->sta_state < IEEE80211_STA_AUTHORIZED ?
+ RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
+
+ switch (bw) {
+ case RATE_MCS_CHAN_WIDTH_40:
+ case RATE_MCS_CHAN_WIDTH_80:
+ case RATE_MCS_CHAN_WIDTH_160:
+ initial_rates = rs_optimal_rates_vht;
+ nentries = ARRAY_SIZE(rs_optimal_rates_vht);
+ break;
+ case RATE_MCS_CHAN_WIDTH_20:
+ initial_rates = rs_optimal_rates_vht_20mhz;
+ nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+ break;
+ default:
+ IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth);
+ goto out;
+ }
+
+ active_rate = lq_sta->active_siso_rate;
+ rate->type = LQ_VHT_SISO;
+ rate->bw = bw;
+ } else if (sta->ht_cap.ht_supported &&
+ best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
+ initial_rates = rs_optimal_rates_ht;
+ nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+ active_rate = lq_sta->active_siso_rate;
+ rate->type = LQ_HT_SISO;
+ } else {
+ active_rate = lq_sta->active_legacy_rate;
+ }
+
+ for (i = 0; i < nentries; i++) {
+ int rate_idx = initial_rates[i].rate_idx;
+
+ if ((best_rssi >= initial_rates[i].rssi) &&
+ (BIT(rate_idx) & active_rate)) {
+ rate->index = rate_idx;
+ break;
+ }
+ }
+
+out:
+ rs_dump_rate(mvm, rate, "INITIAL");
+}
+
+/* Save info about RSSI of last Rx */
+void rs_update_last_rssi(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+ int i;
+
+ lq_sta->pers.chains = rx_status->chains;
+ lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
+ lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
+ lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+ lq_sta->pers.last_rssi = S8_MIN;
+
+ for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+ if (!(lq_sta->pers.chains & BIT(i)))
+ continue;
+
+ if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+ lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+ }
+}
+
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void rs_initialize_lq(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ enum nl80211_band band, bool update)
+{
+ struct iwl_scale_tbl_info *tbl;
+ struct rs_rate *rate;
+ u8 active_tbl = 0;
+
+ if (!sta || !lq_sta)
+ return;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ rate = &tbl->rate;
+
+ rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
+ rs_init_optimal_rate(mvm, sta, lq_sta);
+
+ WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
+ "ant: 0x%x, chains 0x%x, fw tx ant: 0x%x, nvm tx ant: 0x%x\n",
+ rate->ant, lq_sta->pers.chains, mvm->fw->valid_tx_ant,
+ mvm->nvm_data ? mvm->nvm_data->valid_tx_ant : ANT_INVALID);
+
+ tbl->column = rs_get_column_from_rate(rate);
+
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+ /* TODO restore station should remember the lq cmd */
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
+}
+
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+ void *mvm_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct iwl_op_mode *op_mode = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_lq_sta *lq_sta;
+ struct rs_rate *optimal_rate;
+ u32 last_ucode_rate;
+
+ if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
+ /* if vif isn't initialized mvm doesn't know about
+ * this station, so don't do anything with the it
+ */
+ sta = NULL;
+ mvm_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, mvm_sta, txrc))
+ return;
+
+ if (!mvm_sta)
+ return;
+
+ lq_sta = mvm_sta;
+ iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+ info->band, &info->control.rates[0]);
+ info->control.rates[0].count = 1;
+
+ /* Report the optimal rate based on rssi and STA caps if we haven't
+ * converged yet (too little traffic) or exploring other modulations
+ */
+ if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+ optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+ last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+ optimal_rate);
+ iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+ &txrc->reported_rate);
+ }
+}
+
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ lq_sta->pers.drv = mvm;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->pers.dbg_fixed_rate = 0;
+ lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
+ lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
+#endif
+ lq_sta->pers.chains = 0;
+ memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
+
+ return lq_sta;
+}
+
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+ return IWL_RATE_MCS_7_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+ return IWL_RATE_MCS_8_INDEX;
+ else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+ return IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ return -1;
+}
+
+static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
+ struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_lq_sta *lq_sta)
+{
+ int i;
+ int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ continue;
+
+ lq_sta->active_siso_rate |= BIT(i);
+ }
+ }
+
+ if (sta->rx_nss < 2)
+ return;
+
+ highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+ if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+ for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+ if (i == IWL_RATE_9M_INDEX)
+ continue;
+
+ /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+ if (i == IWL_RATE_MCS_9_INDEX &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ continue;
+
+ lq_sta->active_mimo2_rate |= BIT(i);
+ }
+ }
+}
+
+static void rs_ht_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ /* active_siso_rate mask includes 9 MBits (bit 5),
+ * and CCK (bits 0-3), supp_rates[] does not;
+ * shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ if (mvm->cfg->ht_params->ldpc &&
+ (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
+ lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
+ lq_sta->stbc_capable = true;
+
+ lq_sta->is_vht = false;
+}
+
+static void rs_vht_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_sta_vht_cap *vht_cap)
+{
+ rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
+
+ if (mvm->cfg->ht_params->ldpc &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
+ lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
+ lq_sta->stbc_capable = true;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+ lq_sta->bfer_capable = true;
+
+ lq_sta->is_vht = true;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+{
+ spin_lock_bh(&mvm->drv_stats_lock);
+ memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
+ spin_unlock_bh(&mvm->drv_stats_lock);
+}
+
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
+{
+ u8 nss = 0;
+
+ spin_lock(&mvm->drv_stats_lock);
+
+ if (agg)
+ mvm->drv_rx_stats.agg_frames++;
+
+ mvm->drv_rx_stats.success_frames++;
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ mvm->drv_rx_stats.bw_20_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ mvm->drv_rx_stats.bw_40_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ mvm->drv_rx_stats.bw_80_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ mvm->drv_rx_stats.bw_160_frames++;
+ break;
+ default:
+ WARN_ONCE(1, "bad BW. rate 0x%x", rate);
+ }
+
+ if (rate & RATE_MCS_HT_MSK) {
+ mvm->drv_rx_stats.ht_frames++;
+ nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_VHT_MSK) {
+ mvm->drv_rx_stats.vht_frames++;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ } else {
+ mvm->drv_rx_stats.legacy_frames++;
+ }
+
+ if (nss == 1)
+ mvm->drv_rx_stats.siso_frames++;
+ else if (nss == 2)
+ mvm->drv_rx_stats.mimo2_frames++;
+
+ if (rate & RATE_MCS_SGI_MSK)
+ mvm->drv_rx_stats.sgi_frames++;
+ else
+ mvm->drv_rx_stats.ngi_frames++;
+
+ mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
+ mvm->drv_rx_stats.last_frame_idx =
+ (mvm->drv_rx_stats.last_frame_idx + 1) %
+ ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
+
+ spin_unlock(&mvm->drv_stats_lock);
+}
+#endif
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool update)
+{
+ int i, j;
+ struct ieee80211_hw *hw = mvm->hw;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+ struct ieee80211_supported_band *sband;
+ unsigned long supp; /* must be unsigned long for for_each_set_bit */
+
+ /* clear all non-persistent lq data */
+ memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+ sband = hw->wiphy->bands[band];
+
+ lq_sta->lq.sta_id = mvmsta->sta_id;
+ mvmsta->amsdu_enabled = 0;
+ mvmsta->max_amsdu_len = sta->max_amsdu_len;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->last_tx = jiffies;
+
+ IWL_DEBUG_RATE(mvm,
+ "LQ: *** rate scale station global init for station %d ***\n",
+ mvmsta->sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
+ lq_sta->band = sband->band;
+ /*
+ * active legacy rates as per supported rates bitmap
+ */
+ supp = sta->supp_rates[sband->band];
+ lq_sta->active_legacy_rate = 0;
+ for_each_set_bit(i, &supp, BITS_PER_LONG)
+ lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
+ /* TODO: should probably account for rx_highest for both HT/VHT */
+ if (!vht_cap || !vht_cap->vht_supported)
+ rs_ht_init(mvm, sta, lq_sta, ht_cap);
+ else
+ rs_vht_init(mvm, sta, lq_sta, vht_cap);
+
+ lq_sta->max_legacy_rate_idx =
+ rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
+ lq_sta->max_siso_rate_idx =
+ rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
+ lq_sta->max_mimo2_rate_idx =
+ rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
+
+ IWL_DEBUG_RATE(mvm,
+ "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
+ lq_sta->active_legacy_rate,
+ lq_sta->active_siso_rate,
+ lq_sta->active_mimo2_rate,
+ lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
+ lq_sta->bfer_capable);
+ IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
+ lq_sta->max_legacy_rate_idx,
+ lq_sta->max_siso_rate_idx,
+ lq_sta->max_mimo2_rate_idx);
+
+ /* These values will be overridden later */
+ lq_sta->lq.single_stream_ant_msk =
+ first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
+ lq_sta->lq.dual_stream_ant_msk = ANT_AB;
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+ lq_sta->is_agg = 0;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm);
+#endif
+ rs_initialize_lq(mvm, sta, lq_sta, band, update);
+}
+
+static void rs_drv_rate_update(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta,
+ void *priv_sta, u32 changed)
+{
+ struct iwl_op_mode *op_mode = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 tid;
+
+ if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ return;
+
+ /* Stop any ongoing aggregations as rs starts off assuming no agg */
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_stop_tx_ba_session(sta, tid);
+
+ iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
+ struct iwl_lq_cmd *lq_cmd,
+ enum nl80211_band band,
+ u32 ucode_rate)
+{
+ struct rs_rate rate;
+ int i;
+ int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
+ __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
+ u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+ for (i = 0; i < num_rates; i++)
+ lq_cmd->rs_table[i] = ucode_rate_le32;
+
+ if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (is_mimo(&rate))
+ lq_cmd->mimo_delim = num_rates - 1;
+ else
+ lq_cmd->mimo_delim = 0;
+
+ lq_cmd->reduced_tpc = 0;
+
+ if (num_of_ant(ant) == 1)
+ lq_cmd->single_stream_ant_msk = ant;
+
+ if (!mvm->trans->cfg->gen2)
+ lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ else
+ lq_cmd->agg_frame_cnt_limit =
+ LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct rs_rate *rate,
+ __le32 *rs_table, int *rs_table_index,
+ int num_rates, int num_retries,
+ u8 valid_tx_ant, bool toggle_ant)
+{
+ int i, j;
+ __le32 ucode_rate;
+ bool bottom_reached = false;
+ int prev_rate_idx = rate->index;
+ int end = LINK_QUAL_MAX_RETRY_NUM;
+ int index = *rs_table_index;
+
+ for (i = 0; i < num_rates && index < end; i++) {
+ for (j = 0; j < num_retries && index < end; j++, index++) {
+ ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
+ rate));
+ rs_table[index] = ucode_rate;
+ if (toggle_ant)
+ rs_toggle_antenna(valid_tx_ant, rate);
+ }
+
+ prev_rate_idx = rate->index;
+ bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
+ if (bottom_reached && !is_legacy(rate))
+ break;
+ }
+
+ if (!bottom_reached && !is_legacy(rate))
+ rate->index = prev_rate_idx;
+
+ *rs_table_index = index;
+}
+
+/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
+ * column the rate table should look like this:
+ *
+ * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
+ * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
+ * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
+ * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
+ * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
+ * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
+ * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
+ * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
+ * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
+ * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
+ */
+static void rs_build_rates_table(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
+{
+ struct rs_rate rate;
+ int num_rates, num_retries, index = 0;
+ u8 valid_tx_ant = 0;
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ bool toggle_ant = false;
+ u32 color;
+
+ memcpy(&rate, initial_rate, sizeof(rate));
+
+ valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+ /* TODO: remove old API when min FW API hits 14 */
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+ rs_stbc_allow(mvm, sta, lq_sta))
+ rate.stbc = true;
+
+ if (is_siso(&rate)) {
+ num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
+ num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
+ } else if (is_mimo(&rate)) {
+ num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
+ num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
+ } else {
+ num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
+ num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
+ toggle_ant = true;
+ }
+
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
+
+ rs_get_lower_rate_down_column(lq_sta, &rate);
+
+ if (is_siso(&rate)) {
+ num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
+ num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
+ lq_cmd->mimo_delim = index;
+ } else if (is_legacy(&rate)) {
+ num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ toggle_ant = true;
+
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
+
+ rs_get_lower_rate_down_column(lq_sta, &rate);
+
+ num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
+ num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
+
+ rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+ num_rates, num_retries, valid_tx_ant,
+ toggle_ant);
+
+ /* update the color of the LQ command (as a counter at bits 1-3) */
+ color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+ lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
+}
+
+struct rs_bfer_active_iter_data {
+ struct ieee80211_sta *exclude_sta;
+ struct iwl_mvm_sta *bfer_mvmsta;
+};
+
+static void rs_bfer_active_iter(void *_data,
+ struct ieee80211_sta *sta)
+{
+ struct rs_bfer_active_iter_data *data = _data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
+ u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
+
+ if (sta == data->exclude_sta)
+ return;
+
+ /* The current sta has BFER allowed */
+ if (ss_params & LQ_SS_BFER_ALLOWED) {
+ WARN_ON_ONCE(data->bfer_mvmsta != NULL);
+
+ data->bfer_mvmsta = mvmsta;
+ }
+}
+
+static int rs_bfer_priority(struct iwl_mvm_sta *sta)
+{
+ int prio = -1;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
+
+ switch (viftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ prio = 3;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ prio = 2;
+ break;
+ case NL80211_IFTYPE_STATION:
+ prio = 1;
+ break;
+ default:
+ WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
+ prio = -1;
+ }
+
+ return prio;
+}
+
+/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
+static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
+ struct iwl_mvm_sta *sta2)
+{
+ int prio1 = rs_bfer_priority(sta1);
+ int prio2 = rs_bfer_priority(sta2);
+
+ if (prio1 > prio2)
+ return 1;
+ if (prio1 < prio2)
+ return -1;
+ return 0;
+}
+
+static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
+{
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct rs_bfer_active_iter_data data = {
+ .exclude_sta = sta,
+ .bfer_mvmsta = NULL,
+ };
+ struct iwl_mvm_sta *bfer_mvmsta = NULL;
+ u32 ss_params = LQ_SS_PARAMS_VALID;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ goto out;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* Check if forcing the decision is configured.
+ * Note that SISO is forced by not allowing STBC or BFER
+ */
+ if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
+ ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
+ else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
+ ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
+
+ if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
+ IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
+ lq_sta->pers.ss_force);
+ goto out;
+ }
+#endif
+
+ if (lq_sta->stbc_capable)
+ ss_params |= LQ_SS_STBC_1SS_ALLOWED;
+
+ if (!lq_sta->bfer_capable)
+ goto out;
+
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ rs_bfer_active_iter,
+ &data);
+ bfer_mvmsta = data.bfer_mvmsta;
+
+ /* This code is safe as it doesn't run concurrently for different
+ * stations. This is guaranteed by the fact that calls to
+ * ieee80211_tx_status wouldn't run concurrently for a single HW.
+ */
+ if (!bfer_mvmsta) {
+ IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
+
+ ss_params |= LQ_SS_BFER_ALLOWED;
+ goto out;
+ }
+
+ IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
+ bfer_mvmsta->sta_id);
+
+ /* Disallow BFER on another STA if active and we're a higher priority */
+ if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
+ struct iwl_lq_cmd *bfersta_lq_cmd =
+ &bfer_mvmsta->lq_sta.rs_drv.lq;
+ u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
+
+ bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
+ bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
+ iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
+
+ ss_params |= LQ_SS_BFER_ALLOWED;
+ IWL_DEBUG_RATE(mvm,
+ "Lower priority BFER sta found (%d). Switch BFER\n",
+ bfer_mvmsta->sta_id);
+ }
+out:
+ lq_cmd->ss_params = cpu_to_le32(ss_params);
+}
+
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ const struct rs_rate *initial_rate)
+{
+ struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif;
+
+ lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->pers.dbg_fixed_rate) {
+ rs_build_rates_table_from_fixed(mvm, lq_cmd,
+ lq_sta->band,
+ lq_sta->pers.dbg_fixed_rate);
+ return;
+ }
+#endif
+ if (WARN_ON_ONCE(!sta || !initial_rate))
+ return;
+
+ rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
+
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+ rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (num_of_ant(initial_rate->ant) == 1)
+ lq_cmd->single_stream_ant_msk = initial_rate->ant;
+
+ lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+ /*
+ * In case of low latency, tell the firmware to leave a frame in the
+ * Tx Fifo so that it can start a transaction in the same TxOP. This
+ * basically allows the firmware to send bursts.
+ */
+ if (iwl_mvm_vif_low_latency(mvmvif))
+ lq_cmd->agg_frame_cnt_limit--;
+
+ if (mvmsta->vif->p2p)
+ lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK;
+
+ lq_cmd->agg_time_limit =
+ cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
+}
+
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void rs_free(void *mvm_rate)
+{
+ return;
+}
+
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
+{
+ struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+ IWL_DEBUG_RATE(mvm, "enter\n");
+ IWL_DEBUG_RATE(mvm, "leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
+{
+
+ char *type, *bw;
+ u8 mcs = 0, nss = 0;
+ u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+ if (!(rate & RATE_MCS_HT_MSK) &&
+ !(rate & RATE_MCS_VHT_MSK) &&
+ !(rate & RATE_MCS_HE_MSK)) {
+ int index = iwl_hwrate_to_plcp_idx(rate);
+
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+ rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs[index].mbps);
+ }
+
+ if (rate & RATE_MCS_VHT_MSK) {
+ type = "VHT";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HT_MSK) {
+ type = "HT";
+ mcs = rate & RATE_HT_MCS_INDEX_MSK;
+ nss = ((rate & RATE_HT_MCS_NSS_MSK)
+ >> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HE_MSK) {
+ type = "HE";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
+ } else {
+ type = "Unknown"; /* shouldn't happen */
+ }
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ bw = "20Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ bw = "40Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ bw = "80Mhz";
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ bw = "160Mhz";
+ break;
+ default:
+ bw = "BAD BW";
+ }
+
+ return scnprintf(buf, bufsz,
+ "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ type, rs_pretty_ant(ant), bw, mcs, nss,
+ (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+}
+
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta)
+{
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate);
+
+ if (lq_sta->pers.dbg_fixed_rate) {
+ rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
+ iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
+ }
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm *mvm;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+
+ mvm = lq_sta->pers.drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->pers.dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->pers.dbg_fixed_rate = 0;
+
+ rs_program_fix_rate(mvm, lq_sta);
+
+ return count;
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ ssize_t ret;
+ static const size_t bufsz = 2048;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta =
+ container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
+ struct iwl_mvm *mvm;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct rs_rate *rate = &tbl->rate;
+ u32 ss_params;
+
+ mvm = lq_sta->pers.drv;
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "failed=%d success=%d rate=0%lX\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+ desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+ (is_legacy(rate)) ? "legacy" :
+ is_vht(rate) ? "VHT" : "HT");
+ if (!is_legacy(rate)) {
+ desc += scnprintf(buff + desc, bufsz - desc, " %s",
+ (is_siso(rate)) ? "SISO" : "MIMO2");
+ desc += scnprintf(buff + desc, bufsz - desc, " %s",
+ (is_ht20(rate)) ? "20MHz" :
+ (is_ht40(rate)) ? "40MHz" :
+ (is_ht80(rate)) ? "80MHz" :
+ (is_ht160(rate)) ? "160MHz" : "BAD BW");
+ desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
+ (rate->sgi) ? "SGI" : "NGI",
+ (rate->ldpc) ? "LDPC" : "BCC",
+ (lq_sta->is_agg) ? "AGG on" : "",
+ (mvmsta->amsdu_enabled) ? "AMSDU on" : "");
+ }
+ desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
+ lq_sta->lq.flags,
+ lq_sta->lq.mimo_delim,
+ lq_sta->lq.single_stream_ant_msk,
+ lq_sta->lq.dual_stream_ant_msk);
+
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_time_limit),
+ lq_sta->lq.agg_disable_start_th,
+ lq_sta->lq.agg_frame_cnt_limit);
+
+ desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+ lq_sta->lq.reduced_tpc);
+ ss_params = le32_to_cpu(lq_sta->lq.ss_params);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "single stream params: %s%s%s%s\n",
+ (ss_params & LQ_SS_PARAMS_VALID) ?
+ "VALID" : "INVALID",
+ (ss_params & LQ_SS_BFER_ALLOWED) ?
+ ", BFER" : "",
+ (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
+ ", STBC" : "",
+ (ss_params & LQ_SS_FORCE) ?
+ ", FORCE" : "");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.initial_rate_index[0],
+ lq_sta->lq.initial_rate_index[1],
+ lq_sta->lq.initial_rate_index[2],
+ lq_sta->lq.initial_rate_index[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
+
+ desc += scnprintf(buff + desc, bufsz - desc,
+ " rate[%d] 0x%X ", i, r);
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = rs_sta_dbgfs_scale_table_write,
+ .read = rs_sta_dbgfs_scale_table_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+ struct iwl_scale_tbl_info *tbl;
+ struct rs_rate *rate;
+ struct iwl_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ tbl = &(lq_sta->lq_info[i]);
+ rate = &tbl->rate;
+ desc += sprintf(buff+desc,
+ "%s type=%d SGI=%d BW=%s DUP=0\n"
+ "index=%d\n",
+ lq_sta->active_tbl == i ? "*" : "x",
+ rate->type,
+ rate->sgi,
+ is_ht20(rate) ? "20MHz" :
+ is_ht40(rate) ? "40MHz" :
+ is_ht80(rate) ? "80MHz" :
+ is_ht160(rate) ? "160MHz" : "ERR",
+ rate->index);
+ for (j = 0; j < IWL_RATE_COUNT; j++) {
+ desc += sprintf(buff+desc,
+ "counter=%d success=%d %%=%d\n",
+ tbl->win[j].counter,
+ tbl->win[j].success_counter,
+ tbl->win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = rs_sta_dbgfs_stats_table_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char * const column_name[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+ [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+ [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+ [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+ [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+ [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+ [RS_COLUMN_MIMO2] = "MIMO2",
+ [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+ };
+
+ static const char * const rate_name[] = {
+ [IWL_RATE_1M_INDEX] = "1M",
+ [IWL_RATE_2M_INDEX] = "2M",
+ [IWL_RATE_5M_INDEX] = "5.5M",
+ [IWL_RATE_11M_INDEX] = "11M",
+ [IWL_RATE_6M_INDEX] = "6M|MCS0",
+ [IWL_RATE_9M_INDEX] = "9M",
+ [IWL_RATE_12M_INDEX] = "12M|MCS1",
+ [IWL_RATE_18M_INDEX] = "18M|MCS2",
+ [IWL_RATE_24M_INDEX] = "24M|MCS3",
+ [IWL_RATE_36M_INDEX] = "36M|MCS4",
+ [IWL_RATE_48M_INDEX] = "48M|MCS5",
+ [IWL_RATE_54M_INDEX] = "54M|MCS6",
+ [IWL_RATE_MCS_7_INDEX] = "MCS7",
+ [IWL_RATE_MCS_8_INDEX] = "MCS8",
+ [IWL_RATE_MCS_9_INDEX] = "MCS9",
+ [IWL_RATE_MCS_10_INDEX] = "MCS10",
+ [IWL_RATE_MCS_11_INDEX] = "MCS11",
+ };
+
+ char *buff, *pos, *endpos;
+ int col, rate;
+ ssize_t ret;
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct rs_rate_stats *stats;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos, "COLUMN,");
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+ pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+ pos += scnprintf(pos, endpos - pos, "\n");
+
+ for (col = 0; col < RS_COLUMN_COUNT; col++) {
+ pos += scnprintf(pos, endpos - pos,
+ "%s,", column_name[col]);
+
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+ stats = &(lq_sta->pers.tx_stats[col][rate]);
+ pos += scnprintf(pos, endpos - pos,
+ "%llu/%llu,",
+ stats->success,
+ stats->total);
+ }
+ pos += scnprintf(pos, endpos - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+ return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats));
+
+ return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+ .read = rs_sta_dbgfs_drv_tx_stats_read,
+ .write = rs_sta_dbgfs_drv_tx_stats_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ char buf[12];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+ static const char * const ss_force_name[] = {
+ [RS_SS_FORCE_NONE] = "none",
+ [RS_SS_FORCE_STBC] = "stbc",
+ [RS_SS_FORCE_BFER] = "bfer",
+ [RS_SS_FORCE_SISO] = "siso",
+ };
+
+ pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
+ ss_force_name[lq_sta->pers.ss_force]);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+ int ret = 0;
+
+ if (!strncmp("none", buf, 4)) {
+ lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
+ } else if (!strncmp("siso", buf, 4)) {
+ lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
+ } else if (!strncmp("stbc", buf, 4)) {
+ if (lq_sta->stbc_capable) {
+ lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
+ } else {
+ IWL_ERR(mvm,
+ "can't force STBC. peer doesn't support\n");
+ ret = -EINVAL;
+ }
+ } else if (!strncmp("bfer", buf, 4)) {
+ if (lq_sta->bfer_capable) {
+ lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
+ } else {
+ IWL_ERR(mvm,
+ "can't force BFER. peer doesn't support\n");
+ ret = -EINVAL;
+ }
+ } else {
+ IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
+ ret = -EINVAL;
+ }
+ return ret ?: count;
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
+#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, lq_sta, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
+
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+ struct dentry *dir)
+{
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
+
+ if (!mvmsta->vif)
+ return;
+
+ debugfs_create_file("rate_scale_table", 0600, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ debugfs_create_file("rate_stats_table", 0400, dir,
+ lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ debugfs_create_file("drv_tx_stats", 0600, dir,
+ lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
+ debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+ &lq_sta->tx_agg_tid_en);
+ debugfs_create_u8("reduced_tpc", 0600, dir,
+ &lq_sta->pers.dbg_fixed_txp_reduction);
+
+ MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
+ return;
+err:
+ IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
+}
+
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
+{
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_ops(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *mvm_sta)
+{
+}
+
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
+ .name = RS_NAME,
+ .tx_status = rs_drv_mac80211_tx_status,
+ .get_rate = rs_drv_get_rate,
+ .rate_init = rs_rate_init_ops,
+ .alloc = rs_alloc,
+ .free = rs_free,
+ .alloc_sta = rs_drv_alloc_sta,
+ .free_sta = rs_free_sta,
+ .rate_update = rs_drv_rate_update,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = rs_drv_add_sta_debugfs,
+ .remove_sta_debugfs = rs_remove_sta_debugfs,
+#endif
+};
+
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool update)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ rs_fw_rate_init(mvm, sta, band, update);
+ else
+ rs_drv_rate_init(mvm, sta, band, update);
+}
+
+int iwl_mvm_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_mvm_ops_drv);
+}
+
+void iwl_mvm_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
+}
+
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (enable) {
+ if (mvmsta->tx_protection == 0)
+ lq->flags |= LQ_FLAG_USE_RTS_MSK;
+ mvmsta->tx_protection++;
+ } else {
+ mvmsta->tx_protection--;
+ if (mvmsta->tx_protection == 0)
+ lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
+ }
+
+ return iwl_mvm_send_lq_cmd(mvm, lq, false);
+}
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ return rs_fw_tx_protection(mvm, mvmsta, enable);
+ else
+ return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
new file mode 100644
index 000000000..d0f47899f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -0,0 +1,469 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __rs_h__
+#define __rs_h__
+
+#include <net/mac80211.h>
+
+#include "iwl-config.h"
+
+#include "fw-api.h"
+#include "iwl-trans.h"
+
+#define RS_NAME "iwl-mvm-rs"
+
+struct iwl_rs_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_vht_siso;
+ u8 plcp_vht_mimo2;
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+};
+
+#define IWL_RATE_60M_PLCP 3
+
+enum {
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+enum {
+ IWL_RATE_6M_INDEX_TABLE = 0,
+ IWL_RATE_9M_INDEX_TABLE,
+ IWL_RATE_12M_INDEX_TABLE,
+ IWL_RATE_18M_INDEX_TABLE,
+ IWL_RATE_24M_INDEX_TABLE,
+ IWL_RATE_36M_INDEX_TABLE,
+ IWL_RATE_48M_INDEX_TABLE,
+ IWL_RATE_54M_INDEX_TABLE,
+ IWL_RATE_1M_INDEX_TABLE,
+ IWL_RATE_2M_INDEX_TABLE,
+ IWL_RATE_5M_INDEX_TABLE,
+ IWL_RATE_11M_INDEX_TABLE,
+ IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
+#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
+#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
+#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
+#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
+#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
+#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
+#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
+#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
+#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
+#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
+#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
+
+
+/* uCode API values for HT/VHT bit rates */
+enum {
+ IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+ IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+ IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+ IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+ IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+ IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+ IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+ IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+ IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+ IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+ IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+ IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+ IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+ IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+ IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+ IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+ IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+ IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+ IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+ IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+ IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+ IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+ IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+ IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+ IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+ IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+ IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+ IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+ IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+ IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+};
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+
+#define IWL_INVALID_VALUE -1
+
+#define TPC_MAX_REDUCTION 15
+#define TPC_NO_REDUCTION 0
+#define TPC_INVALID 0xff
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD 0
+#define IWL_AGG_ALL_TID 0xff
+
+enum iwl_table_type {
+ LQ_NONE,
+ LQ_LEGACY_G, /* legacy types */
+ LQ_LEGACY_A,
+ LQ_HT_SISO, /* HT types */
+ LQ_HT_MIMO2,
+ LQ_VHT_SISO, /* VHT types */
+ LQ_VHT_MIMO2,
+ LQ_HE_SISO, /* HE types */
+ LQ_HE_MIMO2,
+ LQ_MAX,
+};
+
+struct rs_rate {
+ int index;
+ enum iwl_table_type type;
+ u8 ant;
+ u32 bw;
+ bool sgi;
+ bool ldpc;
+ bool stbc;
+ bool bfer;
+};
+
+
+#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
+ ((type) == LQ_LEGACY_A))
+#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
+#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
+#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
+#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+ is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+ is_type_vht_mimo2(type) || is_type_he_mimo2(type))
+#define is_type_mimo(type) (is_type_mimo2(type))
+#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
+#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
+#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
+#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
+
+#define is_legacy(rate) is_type_legacy((rate)->type)
+#define is_ht_siso(rate) is_type_ht_siso((rate)->type)
+#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type)
+#define is_vht_siso(rate) is_type_vht_siso((rate)->type)
+#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type)
+#define is_siso(rate) is_type_siso((rate)->type)
+#define is_mimo2(rate) is_type_mimo2((rate)->type)
+#define is_mimo(rate) is_type_mimo((rate)->type)
+#define is_ht(rate) is_type_ht((rate)->type)
+#define is_vht(rate) is_type_vht((rate)->type)
+#define is_he(rate) is_type_he((rate)->type)
+#define is_a_band(rate) is_type_a_band((rate)->type)
+#define is_g_band(rate) is_type_g_band((rate)->type)
+
+#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
+#define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+
+ /* persistent fields - initialized only once - keep last! */
+ struct lq_sta_pers_rs_fw {
+ u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ u32 dbg_fixed_rate;
+ u16 dbg_agg_frame_count_lim;
+#endif
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 last_rssi;
+ struct iwl_mvm *drv;
+ } pers;
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+};
+
+/* Possible Tx columns
+ * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
+ */
+enum rs_column {
+ RS_COLUMN_LEGACY_ANT_A = 0,
+ RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2,
+ RS_COLUMN_MIMO2_SGI,
+
+ RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
+ RS_COLUMN_INVALID,
+};
+
+enum rs_ss_force_opt {
+ RS_SS_FORCE_NONE = 0,
+ RS_SS_FORCE_STBC,
+ RS_SS_FORCE_BFER,
+ RS_SS_FORCE_SISO,
+};
+
+/* Packet stats per rate */
+struct rs_rate_stats {
+ u64 success;
+ u64 total;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+ struct rs_rate rate;
+ enum rs_column column;
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+ /* per txpower-reduction history */
+ struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
+};
+
+enum {
+ RS_STATE_SEARCH_CYCLE_STARTED,
+ RS_STATE_SEARCH_CYCLE_ENDED,
+ RS_STATE_STAY_IN_COLUMN,
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+ u8 active_tbl; /* index of active table, range 0-1 */
+ u8 rs_state; /* RS_STATE_* */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u32 visited_columns; /* Bitmask marking which Tx columns were
+ * explored during a search cycle
+ */
+ u64 last_tx;
+ bool is_vht;
+ bool ldpc; /* LDPC Rx is supported by the STA */
+ bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
+ bool bfer_capable; /* Remote supports beamformee and we BFer */
+
+ enum nl80211_band band;
+
+ /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+ unsigned long active_legacy_rate;
+ unsigned long active_siso_rate;
+ unsigned long active_mimo2_rate;
+
+ /* Highest rate per Tx mode */
+ u8 max_legacy_rate_idx;
+ u8 max_siso_rate_idx;
+ u8 max_mimo2_rate_idx;
+
+ /* Optimal rate based on RSSI and STA caps.
+ * Used only to reflect link speed to userspace.
+ */
+ struct rs_rate optimal_rate;
+ unsigned long optimal_rate_mask;
+ const struct rs_init_rate_info *optimal_rates;
+ int optimal_nentries;
+
+ u8 missed_rate_counter;
+
+ struct iwl_lq_cmd lq;
+ struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ u8 tx_agg_tid_en;
+
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+
+ /* tx power reduce for this sta */
+ int tpc_reduce;
+
+ /* persistent fields - initialized only once - keep last! */
+ struct lq_sta_pers {
+#ifdef CONFIG_MAC80211_DEBUGFS
+ u32 dbg_fixed_rate;
+ u8 dbg_fixed_txp_reduction;
+
+ /* force STBC/BFER/SISO for testing */
+ enum rs_ss_force_opt ss_force;
+#endif
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 last_rssi;
+ struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+ struct iwl_mvm *drv;
+ } pers;
+};
+
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+ RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+ (((uintptr_t)_p) |\
+ ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
+/* Initialize station's rate scaling information after adding station */
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool init);
+
+/* Notify RS about Tx status */
+void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info, bool ndp);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+int iwl_mvm_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+void iwl_mvm_rate_control_unregister(void);
+
+struct iwl_mvm_sta;
+
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool update);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
new file mode 100644
index 000000000..bdb87d8e9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -0,0 +1,919 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <asm/unaligned.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "fw-api.h"
+
+/*
+ * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
+ *
+ * Copies the phy information in mvm->last_phy_info, it will be used when the
+ * actual data will come from the fw in the next packet.
+ */
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+ mvm->ampdu_ref++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ spin_lock(&mvm->drv_stats_lock);
+ mvm->drv_rx_stats.ampdu_count++;
+ spin_unlock(&mvm->drv_stats_lock);
+ }
+#endif
+}
+
+/*
+ * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
+ *
+ * Adds the rxb to a new skb and give it to mac80211
+ */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len,
+ u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ unsigned int fraglen;
+
+ /*
+ * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+ * but those are all multiples of 4 long) all goes away, but we
+ * want the *end* of it, which is going to be the start of the IP
+ * header, to be aligned when it gets pulled in.
+ * The beginning of the skb->data is aligned on at least a 4-byte
+ * boundary after allocation. Everything here is aligned at least
+ * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+ * the result.
+ */
+ skb_reserve(skb, hdrlen & 3);
+
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
+ */
+ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+
+ skb_put_data(skb, hdr, hdrlen);
+ fraglen = len - hdrlen;
+
+ if (fraglen) {
+ int offset = (void *)hdr + hdrlen -
+ rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+}
+
+/*
+ * iwl_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM. Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ int energy_a, energy_b, energy_c, max_energy;
+ u32 val;
+
+ val =
+ le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+ energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_A_POS;
+ energy_a = energy_a ? -energy_a : S8_MIN;
+ energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_B_POS;
+ energy_b = energy_b ? -energy_b : S8_MIN;
+ energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_C_POS;
+ energy_c = energy_c ? -energy_c : S8_MIN;
+ max_energy = max(energy_a, energy_b);
+ max_energy = max(max_energy, energy_c);
+
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
+ energy_a, energy_b, energy_c, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+ rx_status->chain_signal[2] = energy_c;
+}
+
+/*
+ * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
+ * @mvm: the mvm object
+ * @hdr: 80211 header
+ * @stats: status in mac80211's format
+ * @rx_pkt_status: status coming from fw
+ *
+ * returns non 0 value if the packet should be dropped
+ */
+static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *stats,
+ u32 rx_pkt_status,
+ u8 *crypt_len)
+{
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_NO_ENC)
+ return 0;
+
+ /* packet was encrypted with unknown alg */
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR)
+ return 0;
+
+ switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+ case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
+ /* alg is CCM: check MIC only */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ *crypt_len = IEEE80211_CCMP_HDR_LEN;
+ return 0;
+
+ case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
+ /* Don't drop the frame and decrypt it in SW */
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+ return 0;
+ *crypt_len = IEEE80211_TKIP_IV_LEN;
+ /* fall through if TTAK OK */
+
+ case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC)
+ *crypt_len = IEEE80211_WEP_IV_LEN;
+ return 0;
+
+ case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+
+ default:
+ /* Expected in monitor (not having the keys) */
+ if (!mvm->monitor_on)
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr, u32 len,
+ struct iwl_rx_phy_info *phy_info,
+ u32 rate_n_flags)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_tcm_mac *mdata;
+ int mac;
+ int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+ struct iwl_mvm_vif *mvmvif;
+ /* expected throughput in 100Kbps, single stream, 20 MHz */
+ static const u8 thresh_tpt[] = {
+ 9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
+ };
+ u16 thr;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+ mdata = &mvm->tcm.data[mac];
+ mdata->rx.pkts[ac]++;
+
+ /* count the airtime only once for each ampdu */
+ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+ mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+ mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+ }
+
+ if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mdata->opened_rx_ba_sessions ||
+ mdata->uapsd_nonagg_detect.detected ||
+ (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) ||
+ mvmsta->sta_id != mvmvif->ap_sta_id)
+ return;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
+ thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ RATE_HT_MCS_NSS_POS);
+ } else {
+ if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >=
+ ARRAY_SIZE(thresh_tpt)))
+ return;
+ thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
+ thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS);
+ }
+
+ thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >>
+ RATE_MCS_CHAN_WIDTH_POS);
+
+ mdata->uapsd_nonagg_detect.rx_bytes += len;
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
+}
+
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u32 status)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+ status & RX_MPDU_RES_STATUS_CSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/*
+ * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_info *phy_info;
+ struct iwl_rx_mpdu_res_start *rx_res;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb;
+ u32 len;
+ u32 rate_n_flags;
+ u32 rx_pkt_status;
+ u8 crypt_len = 0;
+ bool take_ref;
+
+ phy_info = &mvm->last_phy_info;
+ rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+ len = le16_to_cpu(rx_res->byte_count);
+ rx_pkt_status = get_unaligned_le32((__le32 *)
+ (pkt->data + sizeof(*rx_res) + len));
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /*
+ * drop the packet if it has failed being decrypted by HW
+ */
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+ &crypt_len)) {
+ IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+ rx_pkt_status);
+ kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+ IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status->mactime = le64_to_cpu(phy_info->timestamp);
+ rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+ rx_status->band =
+ (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ rx_status->freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
+ rx_status->band);
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+
+ iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
+
+ IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
+ (unsigned long long)rx_status->mactime);
+
+ rcu_read_lock();
+ if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) {
+ u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK;
+
+ id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
+
+ if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+ if (IS_ERR(sta))
+ sta = NULL;
+ }
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /* This is fine since we prevent two stations with the same
+ * address from being added.
+ */
+ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *tx_blocked_vif =
+ rcu_dereference(mvm->csa_tx_blocked_vif);
+
+ /* We have tx blocked stations (with CS bit). If we heard
+ * frames from a blocked station on a new channel we can
+ * TX to it again.
+ */
+ if (unlikely(tx_blocked_vif) &&
+ mvmsta->vif == tx_blocked_vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ if (mvmvif->csa_target_freq == rx_status->freq)
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+ false);
+ }
+
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+ if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+ ieee80211_is_beacon(hdr->frame_control)) {
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+ bool trig_check;
+ s32 rssi;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw,
+ FW_DBG_TRIGGER_RSSI);
+ rssi_trig = (void *)trig->data;
+ rssi = le32_to_cpu(rssi_trig->rssi);
+
+ trig_check =
+ iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(mvmsta->vif),
+ trig);
+ if (trig_check && rx_status->signal < rssi)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ NULL);
+ }
+
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
+ rate_n_flags);
+
+ if (ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+ }
+ rcu_read_unlock();
+
+ /* set the preamble flag if appropriate */
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ /*
+ * We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them
+ */
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ }
+
+ /* Set up the HT phy flags */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else {
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ return;
+ }
+ rx_status->rate_idx = rate;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_update_frame_stats(mvm, rate_n_flags,
+ rx_status->flag & RX_FLAG_AMPDU_DETAILS);
+#endif
+
+ if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boot_ns();
+
+ /* Take a reference briefly to kick off a d0i3 entry delay so
+ * we can handle bursts of RX packets without toggling the
+ * state too often. But don't do this for beacons if we are
+ * going to idle because the beacon filtering changes we make
+ * cause the firmware to send us collateral beacons. */
+ take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
+ ieee80211_is_beacon(hdr->frame_control));
+
+ if (take_ref)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
+ crypt_len, rxb);
+
+ if (take_ref)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
+}
+
+struct iwl_mvm_stat_data {
+ struct iwl_mvm *mvm;
+ __le32 flags;
+ __le32 mac_id;
+ u8 beacon_filter_average_energy;
+ void *general;
+};
+
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ int sig = -data->beacon_filter_average_energy;
+ int last_event;
+ int thold = vif->bss_conf.cqm_rssi_thold;
+ int hyst = vif->bss_conf.cqm_rssi_hyst;
+ u16 id = le32_to_cpu(data->mac_id);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* This doesn't need the MAC ID check since it's not taking the
+ * data copied into the "data" struct, but rather the data from
+ * the notification directly.
+ */
+ if (data->general) {
+ u16 vif_id = mvmvif->id;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ struct mvm_statistics_general_cdb *general =
+ data->general;
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(general->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -general->beacon_average_energy[vif_id];
+ } else {
+ struct mvm_statistics_general_v8 *general =
+ data->general;
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(general->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -general->beacon_average_energy[vif_id];
+ }
+ }
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->beacon_stats.accu_num_beacons +=
+ mvmvif->beacon_stats.num_beacons;
+
+ if (mvmvif->id != id)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (sig == 0) {
+ IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
+ return;
+ }
+
+ mvmvif->bf_data.ave_beacon_signal = sig;
+
+ /* BT Coex */
+ if (mvmvif->bf_data.bt_coex_min_thold !=
+ mvmvif->bf_data.bt_coex_max_thold) {
+ last_event = mvmvif->bf_data.last_bt_coex_event;
+ if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+ (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+ } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+ (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ last_event == 0)) {
+ mvmvif->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+ }
+ }
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ /* CQM Notification */
+ last_event = mvmvif->bf_data.last_cqm_event;
+ if (thold && sig < thold && (last_event == 0 ||
+ sig < last_event - hyst)) {
+ mvmvif->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ sig,
+ GFP_KERNEL);
+ } else if (sig > thold &&
+ (last_event == 0 || sig > last_event + hyst)) {
+ mvmvif->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ sig,
+ GFP_KERNEL);
+ }
+}
+
+static inline void
+iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_stats *trig_stats;
+ u32 trig_offset, trig_thold;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
+ trig_stats = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+ return;
+
+ trig_offset = le32_to_cpu(trig_stats->stop_offset);
+ trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+
+ if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_stat_data data = {
+ .mvm = mvm,
+ };
+ int expected_size;
+ int i;
+ u8 *energy;
+ __le32 *bytes;
+ __le32 *air_time;
+ __le32 flags;
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ if (iwl_mvm_has_new_rx_api(mvm))
+ expected_size = sizeof(struct iwl_notif_statistics_v11);
+ else
+ expected_size = sizeof(struct iwl_notif_statistics_v10);
+ } else {
+ expected_size = sizeof(struct iwl_notif_statistics_cdb);
+ }
+
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
+ "received invalid statistics size (%d)!\n",
+ iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
+
+ data.mac_id = stats->rx.general.mac_id;
+ data.beacon_filter_average_energy =
+ stats->general.common.beacon_filter_average_energy;
+
+ mvm->rx_stats_v3 = stats->rx;
+
+ mvm->radio_stats.rx_time =
+ le64_to_cpu(stats->general.common.rx_time);
+ mvm->radio_stats.tx_time =
+ le64_to_cpu(stats->general.common.tx_time);
+ mvm->radio_stats.on_time_rf =
+ le64_to_cpu(stats->general.common.on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->general.common.on_time_scan);
+
+ data.general = &stats->general;
+
+ flags = stats->flag;
+ } else {
+ struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+
+ data.mac_id = stats->rx.general.mac_id;
+ data.beacon_filter_average_energy =
+ stats->general.common.beacon_filter_average_energy;
+
+ mvm->rx_stats = stats->rx;
+
+ mvm->radio_stats.rx_time =
+ le64_to_cpu(stats->general.common.rx_time);
+ mvm->radio_stats.tx_time =
+ le64_to_cpu(stats->general.common.tx_time);
+ mvm->radio_stats.on_time_rf =
+ le64_to_cpu(stats->general.common.on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->general.common.on_time_scan);
+
+ data.general = &stats->general;
+
+ flags = stats->flag;
+ }
+ data.flags = flags;
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data;
+
+ energy = (void *)&v11->load_stats.avg_energy;
+ bytes = (void *)&v11->load_stats.byte_count;
+ air_time = (void *)&v11->load_stats.air_time;
+ } else {
+ struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+
+ energy = (void *)&stats->load_stats.avg_energy;
+ bytes = (void *)&stats->load_stats.byte_count;
+ air_time = (void *)&stats->load_stats.air_time;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ struct iwl_mvm_sta *sta;
+
+ if (!energy[i])
+ continue;
+
+ sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+ if (!sta)
+ continue;
+ sta->avg_energy = energy[i];
+ }
+ rcu_read_unlock();
+
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
+ return;
+
+ spin_lock(&mvm->tcm.lock);
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+ u32 airtime = le32_to_cpu(air_time[i]);
+ u32 rx_bytes = le32_to_cpu(bytes[i]);
+
+ mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+ if (airtime) {
+ /* re-init every time to store rate from FW */
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
+ rx_bytes * 8 / airtime);
+ }
+
+ mdata->rx.airtime += airtime;
+ }
+ spin_unlock(&mvm->tcm.lock);
+}
+
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
+}
+
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
+ int i;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ONCE(pkt_len != sizeof(*notif),
+ "Received window status notification of wrong size (%u)\n",
+ pkt_len))
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
+ struct ieee80211_sta *sta;
+ u8 sta_id, tid;
+ u64 bitmap;
+ u32 ssn;
+ u16 ratid;
+ u16 received_mpdu;
+
+ ratid = le16_to_cpu(notif->ra_tid[i]);
+ /* check that this TID is valid */
+ if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
+ continue;
+
+ received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
+ if (received_mpdu == 0)
+ continue;
+
+ tid = ratid & BA_WINDOW_STATUS_TID_MSK;
+ /* get the station */
+ sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
+ >> BA_WINDOW_STATUS_STA_ID_POS;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+ bitmap = le64_to_cpu(notif->bitmap[i]);
+ ssn = le32_to_cpu(notif->start_seq_num[i]);
+
+ /* update mac80211 with the bitmap for the reordering buffer */
+ ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
+ received_mpdu);
+ }
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
new file mode 100644
index 000000000..9a4848d69
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -0,0 +1,1518 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "fw-api.h"
+
+static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
+ int queue, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
+ struct iwl_mvm_key_pn *ptk_pn;
+ int res;
+ u8 tid, keyidx;
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ u8 *extiv;
+
+ /* do PN checking */
+
+ /* multicast and non-data only arrives on default queue */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return 0;
+
+ /* do not check PN for open AP */
+ if (!(stats->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ /*
+ * avoid checking for default queue - we don't want to replicate
+ * all the logic that's necessary for checking the PN on fragmented
+ * frames, leave that to mac80211
+ */
+ if (queue == 0)
+ return 0;
+
+ /* if we are here - this for sure is either CCMP or GCMP */
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm,
+ "expected hw-decrypted unicast frame for station\n");
+ return -1;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ keyidx = extiv[3] >> 6;
+
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
+ if (!ptk_pn)
+ return -1;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = 0;
+
+ /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
+ if (tid >= IWL_MAX_TID_COUNT)
+ return -1;
+
+ /* load pn */
+ pn[0] = extiv[7];
+ pn[1] = extiv[6];
+ pn[2] = extiv[5];
+ pn[3] = extiv[4];
+ pn[4] = extiv[1];
+ pn[5] = extiv[0];
+
+ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+ if (res < 0)
+ return -1;
+ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
+ return -1;
+
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ stats->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+/* iwl_mvm_create_skb Adds the rxb to a new skb */
+static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ unsigned int headlen, fraglen, pad_len = 0;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ len -= 2;
+ pad_len = 2;
+ }
+
+ /*
+ * For non monitor interface strip the bytes the RADA might not have
+ * removed. As monitor interface cannot exist with other interfaces
+ * this removal is safe.
+ */
+ if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) {
+ u32 pkt_flags = le32_to_cpu(pkt->len_n_flags);
+
+ /*
+ * If RADA was not enabled then decryption was not performed so
+ * the MIC cannot be removed.
+ */
+ if (!(pkt_flags & FH_RSCSR_RADA_EN)) {
+ if (WARN_ON(crypt_len > mic_crc_len))
+ return -EINVAL;
+
+ mic_crc_len -= crypt_len;
+ }
+
+ if (WARN_ON(mic_crc_len > len))
+ return -EINVAL;
+
+ len -= mic_crc_len;
+ }
+
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
+ */
+ headlen = (len <= skb_tailroom(skb)) ? len :
+ hdrlen + crypt_len + 8;
+
+ /* The firmware may align the packet to DWORD.
+ * The padding is inserted after the IV.
+ * After copying the header + IV skip the padding if
+ * present before copying packet data.
+ */
+ hdrlen += crypt_len;
+
+ if (WARN_ONCE(headlen < hdrlen,
+ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+ hdrlen, len, crypt_len)) {
+ /*
+ * We warn and trace because we want to be able to see
+ * it in trace-cmd as well.
+ */
+ IWL_DEBUG_RX(mvm,
+ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+ hdrlen, len, crypt_len);
+ return -EINVAL;
+ }
+
+ skb_put_data(skb, hdr, hdrlen);
+ skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+ fraglen = len - headlen;
+
+ if (fraglen) {
+ int offset = (void *)hdr + headlen + pad_len -
+ rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ return 0;
+}
+
+/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
+ kfree_skb(skb);
+ } else {
+ unsigned int radiotap_len = 0;
+
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+ __skb_push(skb, radiotap_len);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ }
+}
+
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ struct ieee80211_rx_status *rx_status,
+ u32 rate_n_flags, int energy_a,
+ int energy_b)
+{
+ int max_energy;
+ u32 rate_flags = rate_n_flags;
+
+ energy_a = energy_a ? -energy_a : S8_MIN;
+ energy_b = energy_b ? -energy_b : S8_MIN;
+ max_energy = max(energy_a, energy_b);
+
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
+ energy_a, energy_b, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains =
+ (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+ rx_status->chain_signal[2] = S8_MIN;
+}
+
+static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *stats, u16 phy_info,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 pkt_flags, int queue, u8 *crypt_len)
+{
+ u16 status = le16_to_cpu(desc->status);
+
+ /*
+ * Drop UNKNOWN frames in aggregation, unless in monitor mode
+ * (where we don't have the keys).
+ * We limit this to aggregation because in TKIP this is a valid
+ * scenario, since we may not have the (correct) TTAK (phase 1
+ * key) in the firmware.
+ */
+ if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
+ return -1;
+
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ /* TODO: handle packets encrypted with unknown alg */
+
+ switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
+ case IWL_RX_MPDU_STATUS_SEC_CCM:
+ case IWL_RX_MPDU_STATUS_SEC_GCM:
+ BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
+ /* alg is CCM: check MIC only */
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ if (pkt_flags & FH_RSCSR_RADA_EN)
+ stats->flag |= RX_FLAG_MIC_STRIPPED;
+ *crypt_len = IEEE80211_CCMP_HDR_LEN;
+ return 0;
+ case IWL_RX_MPDU_STATUS_SEC_TKIP:
+ /* Don't drop the frame and decrypt it in SW */
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
+ return 0;
+
+ *crypt_len = IEEE80211_TKIP_IV_LEN;
+ /* fall through if TTAK OK */
+ case IWL_RX_MPDU_STATUS_SEC_WEP:
+ if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_WEP)
+ *crypt_len = IEEE80211_WEP_IV_LEN;
+
+ if (pkt_flags & FH_RSCSR_RADA_EN)
+ stats->flag |= RX_FLAG_ICV_STRIPPED;
+
+ return 0;
+ case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ return -1;
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ default:
+ /* Expected in monitor (not having the keys) */
+ if (!mvm->monitor_on)
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/*
+ * returns true if a packet is a duplicate and should be dropped.
+ * Updates AMSDU PN tracking info
+ */
+static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_rxq_dup_data *dup_data;
+ u8 tid, sub_frame_idx;
+
+ if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ dup_data = &mvm_sta->dup_data[queue];
+
+ /*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ */
+ if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)) {
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ return false;
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ /* frame has qos control */
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = IWL_MAX_TID_COUNT;
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ dup_data->last_sub_frame[tid] >= sub_frame_idx))
+ return true;
+
+ /* Allow same PN as the first subframe for following sub frames */
+ if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ sub_frame_idx > dup_data->last_sub_frame[tid] &&
+ desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
+ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
+ dup_data->last_seq[tid] = hdr->seq_ctrl;
+ dup_data->last_sub_frame[tid] = sub_frame_idx;
+
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+ return false;
+}
+
+int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
+ const u8 *data, u32 count)
+{
+ struct iwl_rxq_sync_cmd *cmd;
+ u32 data_size = sizeof(*cmd) + count;
+ int ret;
+
+ /* should be DWORD aligned */
+ if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
+ return -EINVAL;
+
+ cmd = kzalloc(data_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->rxq_mask = cpu_to_le32(rxq_mask);
+ cmd->count = cpu_to_le32(count);
+ cmd->flags = 0;
+ memcpy(cmd->payload, data, count);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ TRIGGER_RX_QUEUES_NOTIF_CMD),
+ 0, data_size, cmd);
+
+ kfree(cmd);
+ return ret;
+}
+
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
+{
+ return ieee80211_sn_less(sn1, sn2) &&
+ !ieee80211_sn_less(sn1, sn2 - buffer_size);
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mvm_baid_data *baid_data,
+ struct iwl_mvm_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[reorder_buf->queue *
+ baid_data->entries_per_queue];
+ u16 ssn = reorder_buf->head_sn;
+
+ lockdep_assert_held(&reorder_buf->lock);
+
+ /* ignore nssn smaller than head sn - this can happen due to timeout */
+ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+ goto set_timer;
+
+ while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ int index = ssn % reorder_buf->buf_size;
+ struct sk_buff_head *skb_list = &entries[index].e.frames;
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+
+set_timer:
+ if (reorder_buf->num_stored && !reorder_buf->removed) {
+ u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
+
+ while (skb_queue_empty(&entries[index].e.frames))
+ index = (index + 1) % reorder_buf->buf_size;
+ /* modify timer to match next frame's expiration time */
+ mod_timer(&reorder_buf->reorder_timer,
+ entries[index].e.reorder_time + 1 +
+ RX_REORDER_BUF_TIMEOUT_MQ);
+ } else {
+ del_timer(&reorder_buf->reorder_timer);
+ }
+}
+
+void iwl_mvm_reorder_timer_expired(struct timer_list *t)
+{
+ struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
+ struct iwl_mvm_baid_data *baid_data =
+ iwl_mvm_baid_data_from_reorder_buf(buf);
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[buf->queue * baid_data->entries_per_queue];
+ int i;
+ u16 sn = 0, index = 0;
+ bool expired = false;
+ bool cont = false;
+
+ spin_lock(&buf->lock);
+
+ if (!buf->num_stored || buf->removed) {
+ spin_unlock(&buf->lock);
+ return;
+ }
+
+ for (i = 0; i < buf->buf_size ; i++) {
+ index = (buf->head_sn + i) % buf->buf_size;
+
+ if (skb_queue_empty(&entries[index].e.frames)) {
+ /*
+ * If there is a hole and the next frame didn't expire
+ * we want to break and not advance SN
+ */
+ cont = false;
+ continue;
+ }
+ if (!cont &&
+ !time_after(jiffies, entries[index].e.reorder_time +
+ RX_REORDER_BUF_TIMEOUT_MQ))
+ break;
+
+ expired = true;
+ /* continue until next hole after this expired frames */
+ cont = true;
+ sn = ieee80211_sn_add(buf->head_sn, i + 1);
+ }
+
+ if (expired) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id = baid_data->sta_id;
+
+ rcu_read_lock();
+ sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* SN is set to the last expired frame + 1 */
+ IWL_DEBUG_HT(buf->mvm,
+ "Releasing expired frames for sta %u, sn %d\n",
+ sta_id, sn);
+ iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
+ sta, baid_data->tid);
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
+ rcu_read_unlock();
+ } else {
+ /*
+ * If no frame expired and there are stored frames, index is now
+ * pointing to the first unexpired frame - modify timer
+ * accordingly to this frame.
+ */
+ mod_timer(&buf->reorder_timer,
+ entries[index].e.reorder_time +
+ 1 + RX_REORDER_BUF_TIMEOUT_MQ);
+ }
+ spin_unlock(&buf->lock);
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+ struct iwl_mvm_delba_data *data)
+{
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ u8 baid = data->baid;
+
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ reorder_buf->buf_size));
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+
+out:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rxq_sync_notification *notif;
+ struct iwl_mvm_internal_rxq_notif *internal_notif;
+
+ notif = (void *)pkt->data;
+ internal_notif = (void *)notif->payload;
+
+ if (internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie) {
+ WARN_ONCE(1, "Received expired RX queue sync message\n");
+ return;
+ }
+
+ switch (internal_notif->type) {
+ case IWL_MVM_RXQ_EMPTY:
+ break;
+ case IWL_MVM_RXQ_NOTIF_DEL_BA:
+ iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
+ }
+
+ if (internal_notif->sync &&
+ !atomic_dec_return(&mvm->queue_sync_counter))
+ wake_up(&mvm->rx_sync_waitq);
+}
+
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ int queue,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_baid_data *baid_data;
+ struct iwl_mvm_reorder_buffer *buffer;
+ struct sk_buff *tail;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ bool last_subframe =
+ desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+ u8 tid = ieee80211_get_tid(hdr);
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ struct iwl_mvm_reorder_buf_entry *entries;
+ int index;
+ u16 nssn, sn;
+ u8 baid;
+
+ baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+ /*
+ * This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ */
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return false;
+
+ /* no sta yet */
+ if (WARN_ONCE(IS_ERR_OR_NULL(sta),
+ "Got valid BAID without a valid station assigned\n"))
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* not a data packet or a bar */
+ if (!ieee80211_is_back_req(hdr->frame_control) &&
+ (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)))
+ return false;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return false;
+
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
+ if (!baid_data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder);
+ return false;
+ }
+
+ if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
+ "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
+ baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
+ tid))
+ return false;
+
+ nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+ IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &baid_data->reorder_buf[queue];
+ entries = &baid_data->entries[queue * baid_data->entries_per_queue];
+
+ spin_lock_bh(&buffer->lock);
+
+ if (!buffer->valid) {
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+ buffer->valid = true;
+ }
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+ goto drop;
+ }
+
+ /*
+ * If there was a significant jump in the nssn - adjust.
+ * If the SN is smaller than the NSSN it might need to first go into
+ * the reorder buffer, in which case we just release up to it and the
+ * rest of the function will take care of storing it and releasing up to
+ * the nssn
+ */
+ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+ buffer->buf_size) ||
+ !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
+ u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
+ min_sn);
+ }
+
+ /* drop any oudated packets */
+ if (ieee80211_sn_less(sn, buffer->head_sn))
+ goto drop;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
+ buffer->buf_size) &&
+ (!amsdu || last_subframe))
+ buffer->head_sn = nssn;
+ /* No need to update AMSDU last SN - we are moving the head */
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ /*
+ * release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+ /* No need to update AMSDU last SN - we are moving the head */
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ index = sn % buffer->buf_size;
+
+ /*
+ * Check if we already stored this frame
+ * As AMSDU is either received or not as whole, logic is simple:
+ * If we have frames in that position in the buffer and the last frame
+ * originated from AMSDU had a different SN then it is a retransmission.
+ * If it is the same SN then if the subframe index is incrementing it
+ * is the same AMSDU - otherwise it is a retransmission.
+ */
+ tail = skb_peek_tail(&entries[index].e.frames);
+ if (tail && !amsdu)
+ goto drop;
+ else if (tail && (sn != buffer->last_amsdu ||
+ buffer->last_sub_index >= sub_frame_idx))
+ goto drop;
+
+ /* put in reorder buffer */
+ __skb_queue_tail(&entries[index].e.frames, skb);
+ buffer->num_stored++;
+ entries[index].e.reorder_time = jiffies;
+
+ if (amsdu) {
+ buffer->last_amsdu = sn;
+ buffer->last_sub_index = sub_frame_idx;
+ }
+
+ /*
+ * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+ * The reason is that NSSN advances on the first sub-frame, and may
+ * cause the reorder buffer to advance before all the sub-frames arrive.
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ */
+ if (!amsdu || last_subframe)
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+
+ spin_unlock_bh(&buffer->lock);
+ return true;
+
+drop:
+ kfree_skb(skb);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
+ u32 reorder_data, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mvm_baid_data *data;
+
+ rcu_read_lock();
+
+ data = rcu_dereference(mvm->baid_map[baid]);
+ if (!data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder_data);
+ goto out;
+ }
+
+ if (!data->timeout)
+ goto out;
+
+ timeout = data->timeout;
+ /*
+ * Do not update last rx all the time to avoid cache bouncing
+ * between the rx queues.
+ * Update it every timeout. Worst case is the session will
+ * expire after ~ 2 * timeout, which doesn't matter that much.
+ */
+ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+ /* Update is atomic */
+ data->last_rx = now;
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_flip_address(u8 *addr)
+{
+ int i;
+ u8 mac_addr[ETH_ALEN];
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = addr[ETH_ALEN - i - 1];
+ ether_addr_copy(addr, mac_addr);
+}
+
+void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ struct ieee80211_hdr *hdr;
+ u32 len = le16_to_cpu(desc->mpdu_len);
+ u32 rate_n_flags, gp2_on_air_rise;
+ u16 phy_info = le16_to_cpu(desc->phy_info);
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb;
+ u8 crypt_len = 0, channel, energy_a, energy_b;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 he_type = 0xffffffff;
+ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+ u64 he_phy_data = HE_PHY_DATA_INVAL;
+ size_t desc_size;
+
+ if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ channel = desc->v3.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ desc_size = sizeof(*desc);
+ } else {
+ rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ channel = desc->v1.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ desc_size = IWL_RX_DESC_SIZE_V1;
+ }
+
+ hdr = (void *)(pkt->data + desc_size);
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ /*
+ * If the device inserted padding it means that (it thought)
+ * the 802.11 header wasn't a multiple of 4 bytes long. In
+ * this case, reserve two bytes at the start of the SKB to
+ * align the payload properly in case we end up copying it.
+ */
+ skb_reserve(skb, 2);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (rate_n_flags & RATE_MCS_HE_MSK) {
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+ };
+ unsigned int radiotap_len = 0;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ radiotap_len += sizeof(known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_type == RATE_MCS_HE_TYPE_MU) {
+ he_mu = skb_put_data(skb, &mu_known,
+ sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+ }
+
+ /* temporarily hide the radiotap data */
+ __skb_pull(skb, radiotap_len);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
+ le32_to_cpu(pkt->len_n_flags), queue,
+ &crypt_len)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+ IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
+ le16_to_cpu(desc->status));
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+ /* set the preamble flag if appropriate */
+ if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+ if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ u64 tsf_on_air_rise;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+ else
+ tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+ rx_status->mactime = tsf_on_air_rise;
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else if (he_type == RATE_MCS_HE_TYPE_SU) {
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data))
+ he->data3 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ mvm->ampdu_ref++;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+ }
+ rx_status->device_timestamp = gp2_on_air_rise;
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
+ rx_status->band);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+ energy_b);
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ /* toggle is switched whenever new aggregation starts */
+ if (toggle_bit != mvm->ampdu_toggle) {
+ mvm->ampdu_ref++;
+ mvm->ampdu_toggle = toggle_bit;
+
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |=
+ RX_FLAG_AMPDU_EOF_BIT;
+ }
+ }
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ }
+
+ rcu_read_lock();
+
+ if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
+
+ if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+ if (IS_ERR(sta))
+ sta = NULL;
+ }
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /*
+ * This is fine since we prevent two stations with the same
+ * address from being added.
+ */
+ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *tx_blocked_vif =
+ rcu_dereference(mvm->csa_tx_blocked_vif);
+ u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+ IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT);
+
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control) &&
+ time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
+ /*
+ * We have tx blocked stations (with CS bit). If we heard
+ * frames from a blocked station on a new channel we can
+ * TX to it again.
+ */
+ if (unlikely(tx_blocked_vif) &&
+ tx_blocked_vif == mvmsta->vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ if (mvmvif->csa_target_freq == rx_status->freq)
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+ false);
+ }
+
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+ if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+ ieee80211_is_beacon(hdr->frame_control)) {
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+ bool trig_check;
+ s32 rssi;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw,
+ FW_DBG_TRIGGER_RSSI);
+ rssi_trig = (void *)trig->data;
+ rssi = le32_to_cpu(rssi_trig->rssi);
+
+ trig_check =
+ iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(mvmsta->vif),
+ trig);
+ if (trig_check && rx_status->signal < rssi)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ NULL);
+ }
+
+ if (ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_csum(sta, skb, desc);
+
+ if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * Our hardware de-aggregates AMSDUs but copies the mac header
+ * as it to the de-aggregated MPDUs. We need to turn off the
+ * AMSDU bit in the QoS control ourselves.
+ * In addition, HW reverses addr3 and addr4 - reverse it back.
+ */
+ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+ !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ if (mvm->trans->cfg->device_family ==
+ IWL_DEVICE_FAMILY_9000) {
+ iwl_mvm_flip_address(hdr->addr3);
+
+ if (ieee80211_has_a4(hdr->frame_control))
+ iwl_mvm_flip_address(hdr->addr4);
+ }
+ }
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
+ u32 reorder_data = le32_to_cpu(desc->reorder_data);
+
+ iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
+ }
+ }
+
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ if (rate_n_flags & RATE_MCS_HE_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the he_mu pointer,
+ * i.e. we don't have the phy data (due to the bits
+ * being used for TSF). This shouldn't happen though
+ * as management frames where we need the TSF/timers
+ * are not be transmitted in HE-MU, I think.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |=
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+ } else if (he) {
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+ }
+
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else if (he) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_POS)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 1:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 2:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case 3:
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_SU: {
+ u16 val;
+
+ /* LTF syms correspond to streams */
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ switch (rx_status->nss) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 3:
+ case 4:
+ val = 2;
+ break;
+ case 5:
+ case 6:
+ val = 3;
+ break;
+ case 7:
+ case 8:
+ val = 4;
+ break;
+ default:
+ WARN_ONCE(1, "invalid nss: %d\n",
+ rx_status->nss);
+ val = 0;
+ }
+ he->data5 |=
+ le16_encode_bits(val,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ }
+ break;
+ case RATE_MCS_HE_TYPE_MU: {
+ u16 val;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_phy_data == HE_PHY_DATA_INVAL)
+ break;
+
+ val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data);
+
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ cpu_to_le16(FIELD_PREP(
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+ val));
+ }
+ break;
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ case RATE_MCS_HE_TYPE_TRIG:
+ /* not supported yet */
+ break;
+ }
+ } else {
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ rx_status->rate_idx = rate;
+
+ }
+
+ /* management stuff on default queue */
+ if (!queue) {
+ if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ mvm->sched_scan_pass_all ==
+ SCHED_SCAN_PASS_ALL_ENABLED))
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boot_ns();
+ }
+
+ if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+out:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_frame_release *release = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+
+ int baid = release->baid;
+
+ IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
+ release->baid, le16_to_cpu(release->nssn));
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
+ le16_to_cpu(release->nssn));
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
new file mode 100644
index 000000000..16b614cc1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -0,0 +1,2031 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "fw/api/scan.h"
+#include "iwl-io.h"
+
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
+
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_DWELL_FRAGMENTED 44
+#define IWL_SCAN_DWELL_EXTENDED 90
+#define IWL_SCAN_NUM_OF_FRAGS 3
+
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+/* adaptive dwell default APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS 2
+/* adaptive dwell default APs number in social channels (1, 6, 11) */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+
+struct iwl_mvm_scan_timing_params {
+ u32 suspend_time;
+ u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+ [IWL_SCAN_TYPE_UNASSOC] = {
+ .suspend_time = 0,
+ .max_out_time = 0,
+ },
+ [IWL_SCAN_TYPE_WILD] = {
+ .suspend_time = 30,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_MILD] = {
+ .suspend_time = 120,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_FRAGMENTED] = {
+ .suspend_time = 95,
+ .max_out_time = 44,
+ },
+};
+
+struct iwl_mvm_scan_params {
+ /* For CDB this is low band scan type, for non-CDB - type. */
+ enum iwl_mvm_scan_type type;
+ enum iwl_mvm_scan_type hb_type;
+ u32 n_channels;
+ u16 delay;
+ int n_ssids;
+ struct cfg80211_ssid *ssids;
+ struct ieee80211_channel **channels;
+ u32 flags;
+ u8 *mac_addr;
+ u8 *mac_addr_mask;
+ bool no_cck;
+ bool pass_all;
+ int n_match_sets;
+ struct iwl_scan_probe_req preq;
+ struct cfg80211_match_set *match_sets;
+ int n_scan_plans;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ u32 measurement_dwell;
+};
+
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+ return (void *)&cmd->v8.data;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_cdb_scan_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
+static inline struct iwl_scan_umac_chan_param *
+iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+ return &cmd->v8.channel;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return &cmd->v7.channel;
+
+ if (iwl_mvm_cdb_scan_api(mvm))
+ return &cmd->v6.channel;
+
+ return &cmd->v1.channel;
+}
+
+static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
+{
+ if (mvm->scan_rx_ant != ANT_NONE)
+ return mvm->scan_rx_ant;
+ return iwl_mvm_get_valid_rx_ant(mvm);
+}
+
+static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
+{
+ u16 rx_chain;
+ u8 rx_ant;
+
+ rx_ant = iwl_mvm_scan_rx_ant(mvm);
+ rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
+ return cpu_to_le16(rx_chain);
+}
+
+static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
+{
+ if (band == NL80211_BAND_2GHZ)
+ return cpu_to_le32(PHY_BAND_24);
+ else
+ return cpu_to_le32(PHY_BAND_5);
+}
+
+static inline __le32
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
+ bool no_cck)
+{
+ u32 tx_ant;
+
+ mvm->scan_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
+ mvm->scan_last_antenna_idx);
+ tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ if (band == NL80211_BAND_2GHZ && !no_cck)
+ return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
+ tx_ant);
+ else
+ return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
+}
+
+static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int *global_cnt = data;
+
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+ *global_cnt += 1;
+}
+
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+ return mvm->tcm.result.global_load;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ return mvm->tcm.result.band_load[band];
+}
+
+static enum
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
+ enum iwl_mvm_traffic_load load,
+ bool low_latency)
+{
+ int global_cnt = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_condition_iterator,
+ &global_cnt);
+ if (!global_cnt)
+ return IWL_SCAN_TYPE_UNASSOC;
+
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+ return IWL_SCAN_TYPE_MILD;
+
+ return IWL_SCAN_TYPE_WILD;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+{
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
+
+ load = iwl_mvm_get_traffic_load(mvm);
+ low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
+ bool p2p_device,
+ enum nl80211_band band)
+{
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
+
+ load = iwl_mvm_get_traffic_load_band(mvm, band);
+ low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+}
+
+static int
+iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
+ struct cfg80211_scan_request *req,
+ struct iwl_mvm_scan_params *params)
+{
+ u32 duration = scan_timing[params->type].max_out_time;
+
+ if (!req->duration)
+ return 0;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ u32 hb_time = scan_timing[params->hb_type].max_out_time;
+
+ duration = min_t(u32, duration, hb_time);
+ }
+
+ if (req->duration_mandatory && req->duration > duration) {
+ IWL_DEBUG_SCAN(mvm,
+ "Measurement scan - too long dwell %hu (max out time %u)\n",
+ req->duration,
+ duration);
+ return -EOPNOTSUPP;
+ }
+
+ return min_t(u32, (u32)req->duration, duration);
+}
+
+static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
+{
+ /* require rrm scan whenever the fw supports it */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
+}
+
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
+{
+ int max_probe_len;
+
+ max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+ /* we create the 802.11 header and SSID element */
+ max_probe_len -= 24 + 2;
+
+ /* DS parameter set element is added on 2.4GHZ band if required */
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ max_probe_len -= 3;
+
+ return max_probe_len;
+}
+
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+ int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
+
+ /* TODO: [BUG] This function should return the maximum allowed size of
+ * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
+ * in the same command. So the correct implementation of this function
+ * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
+ * command has only 512 bytes and it would leave us with about 240
+ * bytes for scan IEs, which is clearly not enough. So meanwhile
+ * we will report an incorrect value. This may result in a failure to
+ * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
+ * functions with -ENOBUFS, if a large enough probe will be provided.
+ */
+ return max_ie_len;
+}
+
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+ IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ }
+}
+
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
+}
+
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+ switch (status) {
+ case IWL_SCAN_EBS_SUCCESS:
+ return "successful";
+ case IWL_SCAN_EBS_INACTIVE:
+ return "inactive";
+ case IWL_SCAN_EBS_FAILED:
+ case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+ default:
+ return "failed";
+ }
+}
+
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+ bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+
+ /* If this happens, the firmware has mistakenly sent an LMAC
+ * notification during UMAC scans -- warn and ignore it.
+ */
+ if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
+ return;
+
+ /* scan status must be locked for proper checking */
+ lockdep_assert_held(&mvm->mutex);
+
+ /* We first check if we were stopping a scan, in which case we
+ * just clear the stopping flag. Then we check if it was a
+ * firmware initiated stop, in which case we need to inform
+ * mac80211.
+ * Note that we can have a stopping and a running scan
+ * simultaneously, but we can't have two different types of
+ * scans stopping or running at the same time (since LMAC
+ * doesn't support it).
+ */
+
+ if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time after last iteration %d\n",
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
+ ieee80211_scan_completed(mvm->hw, &info);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
+ } else {
+ IWL_ERR(mvm,
+ "got scan complete notification but no scan is running\n");
+ }
+
+ mvm->last_ebs_successful =
+ scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+ scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ int i;
+
+ for (i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ break;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list->ssid, ssid, ssid_len))
+ return i;
+ }
+ return -1;
+}
+
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+ struct iwl_ssid_ie *ssids,
+ u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+
+ /*
+ * copy SSIDs from match list.
+ * iwl_config_sched_scan_profiles() uses the order of these ssids to
+ * config match list.
+ */
+ for (i = 0, j = params->n_match_sets - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ /* skip empty SSID matchsets */
+ if (!params->match_sets[j].ssid.ssid_len)
+ continue;
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->match_sets[j].ssid.ssid_len;
+ memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+ ssids[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ *ssid_bitmap = 0;
+ for (j = params->n_ssids - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ index = iwl_ssid_exist(params->ssids[j].ssid,
+ params->ssids[j].ssid_len,
+ ssids);
+ if (index < 0) {
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->ssids[j].ssid_len;
+ memcpy(ssids[i].ssid, params->ssids[j].ssid,
+ ssids[i].len);
+ *ssid_bitmap |= BIT(i);
+ } else {
+ *ssid_bitmap |= BIT(index);
+ }
+ }
+}
+
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_blacklist *blacklist;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .len[1] = sizeof(*profile_cfg),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int blacklist_len;
+ int i;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+ return -EIO;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+ blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+ else
+ blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+ blacklist = kcalloc(blacklist_len, sizeof(*blacklist), GFP_KERNEL);
+ if (!blacklist)
+ return -ENOMEM;
+
+ profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+ if (!profile_cfg) {
+ ret = -ENOMEM;
+ goto free_blacklist;
+ }
+
+ cmd.data[0] = blacklist;
+ cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+ cmd.data[1] = profile_cfg;
+
+ /* No blacklist configuration */
+
+ profile_cfg->num_profiles = req->n_match_sets;
+ profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg->profiles[i];
+ profile->ssid_index = i;
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = 0xff;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(profile_cfg);
+free_blacklist:
+ kfree(blacklist);
+
+ return ret;
+}
+
+static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, n_match_sets %d\n",
+ req->n_match_sets);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
+
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ return true;
+}
+
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_ABORT_CMD,
+ };
+ u32 status = CAN_ABORT_STATUS;
+
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+ if (ret)
+ return ret;
+
+ if (status != CAN_ABORT_STATUS) {
+ /*
+ * The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before the
+ * microcode has notified us that a scan is completed.
+ */
+ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+ ret = -ENOENT;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+ struct iwl_scan_req_tx_cmd *tx_cmd,
+ bool no_cck)
+{
+ tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
+ tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+ NL80211_BAND_2GHZ,
+ no_cck);
+ tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+
+ tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
+ tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+ NL80211_BAND_5GHZ,
+ no_cck);
+ tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
+}
+
+static void
+iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ int n_channels, u32 ssid_bitmap,
+ struct iwl_scan_req_lmac *cmd)
+{
+ struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ channel_cfg[i].channel_num =
+ cpu_to_le16(channels[i]->hw_value);
+ channel_cfg[i].iter_count = cpu_to_le16(1);
+ channel_cfg[i].iter_interval = 0;
+ channel_cfg[i].flags =
+ cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
+ ssid_bitmap);
+ }
+}
+
+static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
+ size_t len, u8 *const pos)
+{
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ };
+ size_t offs;
+ u8 *newpos = pos;
+
+ if (!iwl_mvm_rrm_scan_needed(mvm)) {
+ memcpy(newpos, ies, len);
+ return newpos + len;
+ }
+
+ offs = ieee80211_ie_split(ies, len,
+ before_ds_params,
+ ARRAY_SIZE(before_ds_params),
+ 0);
+
+ memcpy(newpos, ies, offs);
+ newpos += offs;
+
+ /* Add a placeholder for DS Parameter Set element */
+ *newpos++ = WLAN_EID_DS_PARAMS;
+ *newpos++ = 1;
+ *newpos++ = 0;
+
+ memcpy(newpos, ies + offs, len - offs);
+ newpos += len - offs;
+
+ return newpos;
+}
+
+#define WFA_TPC_IE_LEN 9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+ pos[1] = WFA_TPC_IE_LEN - 2;
+ pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+ pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+ pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+ pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+ pos[6] = 0;
+ /* pos[7] - tx power will be inserted by the FW */
+ pos[7] = 0;
+ pos[8] = 0;
+}
+
+static void
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_ies *ies,
+ struct iwl_mvm_scan_params *params)
+{
+ struct ieee80211_mgmt *frame = (void *)params->preq.buf;
+ u8 *pos, *newpos;
+ const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ params->mac_addr : NULL;
+
+ /*
+ * Unfortunately, right now the offload scan doesn't support randomising
+ * within the firmware, so until the firmware API is ready we implement
+ * it in the driver. This means that the scan iterations won't really be
+ * random, only when it's restarted, but at least that helps a bit.
+ */
+ if (mac_addr)
+ get_random_mask_addr(frame->sa, mac_addr,
+ params->mac_addr_mask);
+ else
+ memcpy(frame->sa, vif->addr, ETH_ALEN);
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ eth_broadcast_addr(frame->da);
+ eth_broadcast_addr(frame->bssid);
+ frame->seq_ctrl = 0;
+
+ pos = frame->u.probe_req.variable;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ params->preq.mac_header.offset = 0;
+ params->preq.mac_header.len = cpu_to_le16(24 + 2);
+
+ /* Insert ds parameter set element on 2.4 GHz band */
+ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
+ ies->ies[NL80211_BAND_2GHZ],
+ ies->len[NL80211_BAND_2GHZ],
+ pos);
+ params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
+ pos = newpos;
+
+ memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+ ies->len[NL80211_BAND_5GHZ]);
+ params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[1].len =
+ cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+ pos += ies->len[NL80211_BAND_5GHZ];
+
+ memcpy(pos, ies->common_ies, ies->common_ie_len);
+ params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+ iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+ WFA_TPC_IE_LEN);
+ } else {
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+ }
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_req_lmac *cmd,
+ struct iwl_mvm_scan_params *params)
+{
+ cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+ cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
+ cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+}
+
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ struct ieee80211_scan_ies *ies,
+ int n_channels)
+{
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+ (ies->common_ie_len +
+ ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] <=
+ iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
+
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
+
+ /* We can only use EBS if:
+ * 1. the feature is supported;
+ * 2. the last EBS was successful;
+ * 3. if only single scan, the single scan EBS API is supported;
+ * 4. it's not a p2p find operation.
+ */
+ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+ mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE);
+}
+
+static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
+{
+ return params->n_scan_plans == 1 &&
+ params->scan_plans[0].iterations == 1;
+}
+
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ int flags = 0;
+
+ if (params->n_ssids == 0)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+ flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
+
+ if (params->pass_all)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+
+ if (iwl_mvm_is_regular_scan(params) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ params->type != IWL_SCAN_TYPE_FRAGMENTED)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
+
+ return flags;
+}
+
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq =
+ (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+ u32 ssid_bitmap = 0;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(cmd, 0, ksize(cmd));
+
+ if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
+
+ cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+ cmd->iter_num = cpu_to_le32(1);
+ cmd->n_channels = (u8)params->n_channels;
+
+ cmd->delay = cpu_to_le32(params->delay);
+
+ cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
+ vif));
+
+ cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+ iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+ iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
+
+ /* this API uses bits 1-20 instead of 0-19 */
+ ssid_bitmap <<= 1;
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ cmd->schedule[i].delay =
+ cpu_to_le16(scan_plan->interval);
+ cmd->schedule[i].iterations = scan_plan->iterations;
+ cmd->schedule[i].full_scan_mul = 1;
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!cmd->schedule[i - 1].iterations)
+ cmd->schedule[i - 1].iterations = 0xff;
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif)) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
+ iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap, cmd);
+
+ *preq = params->preq;
+
+ return 0;
+}
+
+static int rate_to_scan_rate_flag(unsigned int rate)
+{
+ static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
+ [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
+ [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
+ [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
+ [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
+ [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
+ [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
+ [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
+ [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
+ [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
+ [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
+ [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
+ [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
+ };
+
+ return rate_to_scan_rate[rate];
+}
+
+static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
+{
+ struct ieee80211_supported_band *band;
+ unsigned int rates = 0;
+ int i;
+
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+
+ /* Set both basic rates and supported rates */
+ rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
+
+ return cpu_to_le32(rates);
+}
+
+static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_dwell *dwell)
+{
+ dwell->active = IWL_SCAN_DWELL_ACTIVE;
+ dwell->passive = IWL_SCAN_DWELL_PASSIVE;
+ dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
+ dwell->extended = IWL_SCAN_DWELL_EXTENDED;
+}
+
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
+{
+ struct ieee80211_supported_band *band;
+ int i, j = 0;
+
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ channels[j] = band->channels[i].hw_value;
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ channels[j] = band->channels[i].hw_value;
+}
+
+static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
+{
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ struct iwl_scan_config_v1 *cfg = config;
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+ cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
+
+ memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg->channel_flags = channel_flags;
+
+ iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
+{
+ struct iwl_scan_config *cfg = config;
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ enum iwl_mvm_scan_type lb_type, hb_type;
+
+ lb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_2GHZ);
+ hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_5GHZ);
+
+ cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[lb_type].max_out_time);
+ cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[lb_type].suspend_time);
+
+ cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[hb_type].max_out_time);
+ cfg->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[hb_type].suspend_time);
+ } else {
+ enum iwl_mvm_scan_type type =
+ iwl_mvm_get_scan_type(mvm, false);
+
+ cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[type].suspend_time);
+ }
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
+
+ memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg->channel_flags = channel_flags;
+
+ iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+ void *cfg;
+ int ret, cmd_size;
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ };
+ enum iwl_mvm_scan_type type;
+ enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
+ int num_channels =
+ mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
+ u32 flags;
+ u8 channel_flags;
+
+ if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_2GHZ);
+ hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_5GHZ);
+ if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
+ return 0;
+ } else {
+ type = iwl_mvm_get_scan_type(mvm, false);
+ if (type == mvm->scan_type)
+ return 0;
+ }
+
+ if (iwl_mvm_cdb_scan_api(mvm))
+ cmd_size = sizeof(struct iwl_scan_config);
+ else
+ cmd_size = sizeof(struct iwl_scan_config_v1);
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
+
+ cfg = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ flags = SCAN_CONFIG_FLAG_ACTIVATE |
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
+ SCAN_CONFIG_N_CHANNELS(num_channels) |
+ (type == IWL_SCAN_TYPE_FRAGMENTED ?
+ SCAN_CONFIG_FLAG_SET_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+ channel_flags = IWL_CHANNEL_FLAG_EBS |
+ IWL_CHANNEL_FLAG_ACCURATE_EBS |
+ IWL_CHANNEL_FLAG_EBS_ADD |
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+ /*
+ * Check for fragmented scan on LMAC2 - high band.
+ * LMAC1 - low band is checked above.
+ */
+ if (iwl_mvm_cdb_scan_api(mvm)) {
+ if (iwl_mvm_is_cdb_supported(mvm))
+ flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
+ SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
+ iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+ } else {
+ iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
+ }
+
+ cmd.data[0] = cfg;
+ cmd.len[0] = cmd_size;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (!ret) {
+ mvm->scan_type = type;
+ mvm->hb_scan_type = hb_type;
+ }
+
+ kfree(cfg);
+ return ret;
+}
+
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
+{
+ int i;
+
+ for (i = 0; i < mvm->max_scans; i++)
+ if (mvm->scan_uid_status[i] == status)
+ return i;
+
+ return -ENOENT;
+}
+
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_req_umac *cmd,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+ u8 active_dwell, passive_dwell;
+
+ timing = &scan_timing[params->type];
+ active_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.adwell_default_n_aps_social =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ cmd->v7.adwell_default_n_aps =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS;
+
+ /* if custom max budget was configured with debugfs */
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+ else if (params->ssids && params->ssids[0].ssid_len)
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ hb_timing = &scan_timing[params->hb_type];
+
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+ }
+
+ if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+ cmd->v7.active_dwell = active_dwell;
+ cmd->v7.passive_dwell = passive_dwell;
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ } else {
+ cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+ cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
+ active_dwell;
+ cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
+ passive_dwell;
+ }
+ }
+ } else {
+ cmd->v1.extended_dwell = params->measurement_dwell ?
+ params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = active_dwell;
+ cmd->v1.passive_dwell = passive_dwell;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ hb_timing = &scan_timing[params->hb_type];
+
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+ }
+
+ if (iwl_mvm_cdb_scan_api(mvm)) {
+ cmd->v6.scan_priority =
+ cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ } else {
+ cmd->v1.scan_priority =
+ cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v1.max_out_time =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v1.suspend_time =
+ cpu_to_le32(timing->suspend_time);
+ }
+ }
+
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ int n_channels, u32 ssid_bitmap,
+ struct iwl_scan_channel_cfg_umac *channel_cfg)
+{
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+ channel_cfg[i].channel_num = channels[i]->hw_value;
+ channel_cfg[i].iter_count = 1;
+ channel_cfg[i].iter_interval = 0;
+ }
+}
+
+static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ u16 flags = 0;
+
+ if (params->n_ssids == 0)
+ flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
+
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
+
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+ if (!iwl_mvm_is_regular_scan(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+ if (params->measurement_dwell)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
+
+ /*
+ * Extended dwell is relevant only for low band to start with, as it is
+ * being used for social channles only (1, 6, 11), so we can check
+ * only scan type on low band also for CDB.
+ */
+ if (iwl_mvm_is_regular_scan(params) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ params->type != IWL_SCAN_TYPE_FRAGMENTED &&
+ !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
+ !iwl_mvm_is_oce_supported(mvm))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
+
+ if (iwl_mvm_is_oce_supported(mvm)) {
+ if ((params->flags &
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
+ /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
+ * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
+ * the same bit, we need to make sure that we use this bit here
+ * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
+ * used. */
+ if ((params->flags &
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
+ !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
+ if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
+ }
+
+ return flags;
+}
+
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params,
+ int type)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+ struct iwl_scan_umac_chan_param *chan_param;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
+ struct iwl_scan_req_umac_tail *sec_part = cmd_data +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels;
+ int uid, i;
+ u32 ssid_bitmap = 0;
+ u8 channel_flags = 0;
+ u16 gen_flags;
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
+
+ memset(cmd, 0, ksize(cmd));
+
+ iwl_mvm_scan_umac_dwell(mvm, cmd, params);
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->uid = cpu_to_le32(uid);
+ gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
+ cmd->general_flags = cpu_to_le16(gen_flags);
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
+ cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
+ IWL_SCAN_NUM_OF_FRAGS;
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
+ cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
+ IWL_SCAN_NUM_OF_FRAGS;
+ }
+
+ cmd->scan_start_mac_id = scan_vif->id;
+
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif))
+ channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ chan_param->flags = channel_flags;
+ chan_param->count = params->n_channels;
+
+ iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap,
+ cmd_data);
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ sec_part->schedule[i].iter_count = scan_plan->iterations;
+ sec_part->schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!sec_part->schedule[i - 1].iter_count)
+ sec_part->schedule[i - 1].iter_count = 0xff;
+
+ sec_part->delay = cpu_to_le16(params->delay);
+ sec_part->preq = params->preq;
+
+ return 0;
+}
+
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+ return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
+
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ /* This looks a bit arbitrary, but the idea is that if we run
+ * out of possible simultaneous scans and the userspace is
+ * trying to run a scan type that is already running, we
+ * return -EBUSY. But if the userspace wants to start a
+ * different type of scan, we stop the opposite type to make
+ * space for the new request. The reason is backwards
+ * compatibility with old wpa_supplicant that wouldn't stop a
+ * scheduled scan before starting a normal scan.
+ */
+
+ if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+ return 0;
+
+ /* Use a switch, even though this is a bitmask, so that more
+ * than one bits set will fall in default and we will warn.
+ */
+ switch (type) {
+ case IWL_MVM_SCAN_REGULAR:
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ case IWL_MVM_SCAN_SCHED:
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+ case IWL_MVM_SCAN_NETDETECT:
+ /* For non-unified images, there's no need to stop
+ * anything for net-detect since the firmware is
+ * restarted anyway. This way, any sched scans that
+ * were running will be restarted when we resume.
+ */
+ if (!unified_image)
+ return 0;
+
+ /* If this is a unified image and we ran out of scans,
+ * we need to stop something. Prefer stopping regular
+ * scans, because the results are useless at this
+ * point, and we should be able to keep running
+ * another scheduled scan while suspended.
+ */
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
+ true);
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
+ true);
+
+ /* fall through, something is wrong if no scan was
+ * running but we ran out of scans.
+ */
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -EIO;
+}
+
+#define SCAN_TIMEOUT 30000
+
+void iwl_mvm_scan_timeout_wk(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ scan_timeout_dwork);
+
+ IWL_ERR(mvm, "regular scan timed out\n");
+
+ iwl_force_nmi(mvm->trans);
+}
+
+static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ bool p2p)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->type =
+ iwl_mvm_get_scan_type_band(mvm, p2p,
+ NL80211_BAND_2GHZ);
+ params->hb_type =
+ iwl_mvm_get_scan_type_band(mvm, p2p,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->type = iwl_mvm_get_scan_type(mvm, p2p);
+ }
+}
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct iwl_host_cmd hcmd = {
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mvm_scan_params params = {};
+ int ret;
+ struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+ return -EBUSY;
+ }
+
+ ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+ if (ret)
+ return ret;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mvm->scan_cmd))
+ return -ENOMEM;
+
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
+
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.delay = 0;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = req->no_cck;
+ params.pass_all = true;
+ params.n_match_sets = 0;
+ params.match_sets = NULL;
+
+ params.scan_plans = &scan_plan;
+ params.n_scan_plans = 1;
+
+ iwl_mvm_fill_scan_type(mvm, &params,
+ vif->type == NL80211_IFTYPE_P2P_DEVICE);
+
+ ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
+ if (ret < 0)
+ return ret;
+
+ params.measurement_dwell = ret;
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+ ret = iwl_mvm_scan_umac(mvm, vif, &params,
+ IWL_MVM_SCAN_REGULAR);
+ } else {
+ hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+ ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+ }
+
+ if (ret)
+ return ret;
+
+ iwl_mvm_pause_tcm(mvm, false);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret) {
+ /* If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ iwl_mvm_resume_tcm(mvm);
+ return ret;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+ mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+
+ schedule_delayed_work(&mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
+
+ return 0;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mvm_scan_params params = {};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+ return -EBUSY;
+ }
+
+ ret = iwl_mvm_check_running_scans(mvm, type);
+ if (ret)
+ return ret;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mvm->scan_cmd))
+ return -ENOMEM;
+
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
+
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = false;
+ params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
+ params.n_match_sets = req->n_match_sets;
+ params.match_sets = req->match_sets;
+ if (!req->n_scan_plans)
+ return -EINVAL;
+
+ params.n_scan_plans = req->n_scan_plans;
+ params.scan_plans = req->scan_plans;
+
+ iwl_mvm_fill_scan_type(mvm, &params,
+ vif->type == NL80211_IFTYPE_P2P_DEVICE);
+
+ /* In theory, LMAC scans can handle a 32-bit delay, but since
+ * waiting for over 18 hours to start the scan is a bit silly
+ * and to keep it aligned with UMAC scans (which only support
+ * 16-bit delays), trim it down to 16-bits.
+ */
+ if (req->delay > U16_MAX) {
+ IWL_DEBUG_SCAN(mvm,
+ "delay value is > 16-bits, set to max possible\n");
+ params.delay = U16_MAX;
+ } else {
+ params.delay = req->delay;
+ }
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+ ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
+ } else {
+ hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+ ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+ }
+
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sched scan request was sent successfully\n");
+ mvm->scan_status |= type;
+ } else {
+ /* If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
+ }
+
+ return ret;
+}
+
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+ bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+
+ if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
+ return;
+
+ /* if the scan is already stopping, we don't need to notify mac80211 */
+ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ .scan_start_tsf = mvm->scan_start,
+ };
+
+ memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
+ ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_vif = NULL;
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+
+ mvm->scan_status &= ~mvm->scan_uid_status[uid];
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+ uid, mvm->scan_uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mvm_ebs_status_str(notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time from last iteration %d\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
+ if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+ notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
+ mvm->last_ebs_successful = false;
+
+ mvm->scan_uid_status[uid] = 0;
+}
+
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+
+ mvm->scan_start = le64_to_cpu(notif->start_tsf);
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+ IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ }
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ mvm->scan_start);
+}
+
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+{
+ struct iwl_umac_scan_abort cmd = {};
+ int uid, ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* We should always get a valid index here, because we already
+ * checked that this type of scan was running in the generic
+ * code.
+ */
+ uid = iwl_mvm_scan_uid_by_status(mvm, type);
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+ cmd.uid = cpu_to_le32(uid);
+
+ IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SCAN_ABORT_UMAC,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (!ret)
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+ return ret;
+}
+
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
+{
+ struct iwl_notification_wait wait_scan_done;
+ static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ SCAN_OFFLOAD_COMPLETE, };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ NULL, NULL);
+
+ IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ ret = iwl_mvm_umac_scan_abort(mvm, type);
+ else
+ ret = iwl_mvm_lmac_scan_abort(mvm);
+
+ if (ret) {
+ IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+
+ return ret;
+}
+
+int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+{
+ int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
+
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
+ else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_cdb_scan_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ return base_size +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_req_umac_tail);
+
+ return sizeof(struct iwl_scan_req_lmac) +
+ sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_probe_req);
+}
+
+/*
+ * This function is used in nic restart flow, to inform mac80211 about scans
+ * that was aborted by restart flow or by an assert.
+ */
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ int uid, i;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+ if (uid >= 0) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_uid_status[uid] = 0;
+ }
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+ if (uid >= 0 && !mvm->fw_restart) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ mvm->scan_uid_status[uid] = 0;
+ }
+
+ /* We shouldn't have any UIDs still set. Loop over all the
+ * UIDs to make sure there's nothing left there and warn if
+ * any is found.
+ */
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
+ }
+ } else {
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ }
+
+ /* Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
+ !mvm->fw_restart) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+ }
+}
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+ int ret;
+
+ if (!(mvm->scan_status & type))
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iwl_mvm_scan_stop_wait(mvm, type);
+ if (!ret)
+ mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+ /* Clear the scan status so the next scan requests will
+ * succeed and mark the scan as stopping, so that the Rx
+ * handler doesn't do anything, as the scan was stopped from
+ * above.
+ */
+ mvm->scan_status &= ~type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ /* Since the rx handler won't do anything now, we have
+ * to release the scan reference here.
+ */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ if (notify) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ }
+ } else if (notify) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
new file mode 100644
index 000000000..539b06bf0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -0,0 +1,336 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+
+/* For counting bound interfaces */
+struct iwl_mvm_active_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ u8 sta_vif_ap_sta_id;
+ enum iwl_sf_state sta_vif_state;
+ int num_active_macs;
+};
+
+/*
+ * Count bound interfaces which are not p2p, besides data->ignore_vif.
+ * data->station_vif will point to one bound vif of type station, if exists.
+ */
+static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_active_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
+ vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ data->num_active_macs++;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
+ if (vif->bss_conf.assoc)
+ data->sta_vif_state = SF_FULL_ON;
+ else
+ data->sta_vif_state = SF_INIT_OFF;
+ }
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const
+__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER_DEF),
+ cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER),
+ cpu_to_le32(SF_BA_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
+ struct iwl_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_sta *sta)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (sta) {
+ if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ switch (sta->rx_nss) {
+ case 1:
+ watermark = SF_W_MARK_SISO;
+ break;
+ case 2:
+ watermark = SF_W_MARK_MIMO2;
+ break;
+ default:
+ watermark = SF_W_MARK_MIMO3;
+ break;
+ }
+ } else {
+ watermark = SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+
+ for (i = 0; i < SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+
+ if (sta) {
+ BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+ sizeof(__le32) * SF_NUM_SCENARIO *
+ SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+ sizeof(sf_full_timeout));
+ } else {
+ BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+ sizeof(__le32) * SF_NUM_SCENARIO *
+ SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
+ sizeof(sf_full_timeout_def));
+ }
+
+}
+
+static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
+ enum iwl_sf_state new_state)
+{
+ struct iwl_sf_cfg_cmd sf_cmd = {
+ .state = cpu_to_le32(new_state),
+ };
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ if (mvm->cfg->disable_dummy_notification)
+ sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
+
+ /*
+ * If an associated AP sta changed its antenna configuration, the state
+ * will remain FULL_ON but SF parameters need to be reconsidered.
+ */
+ if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
+ return 0;
+
+ switch (new_state) {
+ case SF_UNINIT:
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+ break;
+ case SF_FULL_ON:
+ if (sta_id == IWL_MVM_INVALID_STA) {
+ IWL_ERR(mvm,
+ "No station: Cannot switch SF to FULL_ON\n");
+ return -EINVAL;
+ }
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
+ rcu_read_unlock();
+ break;
+ case SF_INIT_OFF:
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+ new_state);
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ if (!ret)
+ mvm->sf_state = new_state;
+
+ return ret;
+}
+
+/*
+ * Update Smart fifo:
+ * Count bound interfaces that are not to be removed, ignoring p2p devices,
+ * and set new state accordingly.
+ */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
+ bool remove_vif)
+{
+ enum iwl_sf_state new_state;
+ u8 sta_id = IWL_MVM_INVALID_STA;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct iwl_mvm_active_iface_iterator_data data = {
+ .ignore_vif = changed_vif,
+ .sta_vif_state = SF_UNINIT,
+ .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
+ };
+
+ /*
+ * Ignore the call if we are in HW Restart flow, or if the handled
+ * vif is a p2p device.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
+ return 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bound_iface_iterator,
+ &data);
+
+ /* If changed_vif exists and is not to be removed, add to the count */
+ if (changed_vif && !remove_vif)
+ data.num_active_macs++;
+
+ switch (data.num_active_macs) {
+ case 0:
+ /* If there are no active macs - change state to SF_INIT_OFF */
+ new_state = SF_INIT_OFF;
+ break;
+ case 1:
+ if (remove_vif) {
+ /* The one active mac left is of type station
+ * and we filled the relevant data during iteration
+ */
+ new_state = data.sta_vif_state;
+ sta_id = data.sta_vif_ap_sta_id;
+ } else {
+ if (WARN_ON(!changed_vif))
+ return -EINVAL;
+ if (changed_vif->type != NL80211_IFTYPE_STATION) {
+ new_state = SF_UNINIT;
+ } else if (changed_vif->bss_conf.assoc &&
+ changed_vif->bss_conf.dtim_period) {
+ mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+ sta_id = mvmvif->ap_sta_id;
+ new_state = SF_FULL_ON;
+ } else {
+ new_state = SF_INIT_OFF;
+ }
+ }
+ break;
+ default:
+ /* If there are multiple active macs - change to SF_UNINIT */
+ new_state = SF_UNINIT;
+ }
+ return iwl_mvm_sf_config(mvm, sta_id, new_state);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
new file mode 100644
index 000000000..373ace38e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -0,0 +1,3667 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "sta.h"
+#include "rs.h"
+
+/*
+ * New version of ADD_STA_sta command added new fields at the end of the
+ * structure, so sending the size of the relevant API's structure is enough to
+ * support both API versions.
+ */
+static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
+{
+ if (iwl_mvm_has_new_rx_api(mvm) ||
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return sizeof(struct iwl_mvm_add_sta_cmd);
+ else
+ return sizeof(struct iwl_mvm_add_sta_cmd_v7);
+}
+
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+ enum nl80211_iftype iftype)
+{
+ int sta_id;
+ u32 reserved_ids = 0;
+
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
+ WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+ if (iftype != NL80211_IFTYPE_STATION)
+ reserved_ids = BIT(0);
+
+ /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+ for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
+ if (BIT(sta_id) & reserved_ids)
+ continue;
+
+ if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex)))
+ return sta_id;
+ }
+ return IWL_MVM_INVALID_STA;
+}
+
+/* send station add/update command to firmware */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update, unsigned int flags)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd add_sta_cmd = {
+ .sta_id = mvm_sta->sta_id,
+ .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+ .add_modify = update ? 1 : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
+ STA_FLG_MIMO_EN_MSK |
+ STA_FLG_RTS_MIMO_PROT),
+ .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
+ };
+ int ret;
+ u32 status;
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ add_sta_cmd.station_type = mvm_sta->sta_type;
+
+ if (!update || (flags & STA_MODIFY_QUEUES)) {
+ memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ add_sta_cmd.tfd_queue_msk =
+ cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ } else {
+ WARN_ON(flags & STA_MODIFY_QUEUES);
+ }
+ }
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
+ /* fall through */
+ case IEEE80211_STA_RX_BW_80:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
+ /* fall through */
+ case IEEE80211_STA_RX_BW_40:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
+ /* fall through */
+ case IEEE80211_STA_RX_BW_20:
+ if (sta->ht_cap.ht_supported)
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
+ break;
+ }
+
+ switch (sta->rx_nss) {
+ case 1:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+ break;
+ case 2:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
+ break;
+ case 3 ... 8:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
+ break;
+ }
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ break;
+ case IEEE80211_SMPS_STATIC:
+ /* override NSS */
+ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
+ break;
+ case IEEE80211_SMPS_OFF:
+ /* nothing */
+ break;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ add_sta_cmd.station_flags_msk |=
+ cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENS_MSK);
+
+ mpdu_dens = sta->ht_cap.ampdu_density;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ agg_size = sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ agg_size >>=
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ } else if (sta->ht_cap.ht_supported) {
+ agg_size = sta->ht_cap.ampdu_factor;
+ }
+
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+ if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
+ add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+
+ if (sta->wme) {
+ add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ add_sta_cmd.uapsd_acs |= BIT(AC_BK);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ add_sta_cmd.uapsd_acs |= BIT(AC_BE);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ add_sta_cmd.uapsd_acs |= BIT(AC_VI);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ add_sta_cmd.uapsd_acs |= BIT(AC_VO);
+ add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
+ add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
+ }
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &add_sta_cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "ADD_STA failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
+{
+ struct iwl_mvm_baid_data *data =
+ from_timer(data, t, session_timer);
+ struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ unsigned long timeout;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(*rcu_ptr);
+
+ if (WARN_ON(!ba_data))
+ goto unlock;
+
+ if (!ba_data->timeout)
+ goto unlock;
+
+ timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&ba_data->session_timer, timeout);
+ goto unlock;
+ }
+
+ /* Timer expired */
+ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+
+ /*
+ * sta should be valid unless the following happens:
+ * The firmware asserts which triggers a reconfig flow, but
+ * the reconfig fails before we set the pointer to sta into
+ * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+ * A-MDPU and hence the timer continues to run. Then, the
+ * timer expires and sta is NULL.
+ */
+ if (!sta)
+ goto unlock;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+ sta->addr, ba_data->tid);
+unlock:
+ rcu_read_unlock();
+}
+
+/* Disable aggregations for a bitmap of TIDs for a given station */
+static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
+ unsigned long disable_agg_tids,
+ bool remove_queue)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u32 status;
+ u8 sta_id;
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ mvmsta->tid_disable_agg |= disable_agg_tids;
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ if (disable_agg_tids)
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+ if (remove_queue)
+ cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ rcu_read_unlock();
+
+ /* Notify FW of queue removal from the STA queues */
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+
+ return ret;
+}
+
+static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long tid_bitmap;
+ unsigned long agg_tids = 0;
+ u8 sta_id;
+ int tid;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return -EINVAL;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+ agg_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ return agg_tids;
+}
+
+/*
+ * Remove a queue from a station's resources.
+ * Note that this only marks as free. It DOESN'T delete a BA agreement, and
+ * doesn't disable the queue
+ */
+static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long tid_bitmap;
+ unsigned long disable_agg_tids = 0;
+ u8 sta_id;
+ int tid;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ /* Unmap MAC queues and TIDs from this queue */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+ disable_agg_tids |= BIT(tid);
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+
+ mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+ spin_unlock_bh(&mvmsta->lock);
+
+ rcu_read_unlock();
+
+ /*
+ * The TX path may have been using this TXQ_ID from the tid_data,
+ * so make sure it's no longer running so that we can safely reuse
+ * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
+ * above, but nothing guarantees we've stopped using them. Thus,
+ * without this, we could get to iwl_mvm_disable_txq() and remove
+ * the queue while still sending frames to it.
+ */
+ synchronize_net();
+
+ return disable_agg_tids;
+}
+
+static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+ bool same_sta)
+{
+ struct iwl_mvm_sta *mvmsta;
+ u8 txq_curr_ac, sta_id, tid;
+ unsigned long disable_agg_tids = 0;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid = mvm->queue_info[queue].txq_tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (WARN_ON(!mvmsta))
+ return -EINVAL;
+
+ disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+ /* Disable the queue */
+ if (disable_agg_tids)
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+
+ ret = iwl_mvm_disable_txq(mvm, queue,
+ mvmsta->vif->hw_queue[txq_curr_ac],
+ tid, 0);
+ if (ret) {
+ /* Re-mark the inactive queue as inactive */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm,
+ "Failed to free inactive queue %d (ret=%d)\n",
+ queue, ret);
+
+ return ret;
+ }
+
+ /* If TXQ is allocated to another STA, update removal in FW */
+ if (!same_sta)
+ iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
+
+ return 0;
+}
+
+static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
+ unsigned long tfd_queue_mask, u8 ac)
+{
+ int queue = 0;
+ u8 ac_to_queue[IEEE80211_NUM_ACS];
+ int i;
+
+ lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
+
+ /* See what ACs the existing queues for this STA have */
+ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
+ /* Only DATA queues can be shared */
+ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+ i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
+ continue;
+
+ /* Don't try and take queues being reconfigured */
+ if (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_RECONFIGURING)
+ continue;
+
+ ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
+ }
+
+ /*
+ * The queue to share is chosen only from DATA queues as follows (in
+ * descending priority):
+ * 1. An AC_BE queue
+ * 2. Same AC queue
+ * 3. Highest AC queue that is lower than new AC
+ * 4. Any existing AC (there always is at least 1 DATA queue)
+ */
+
+ /* Priority 1: An AC_BE queue */
+ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_BE];
+ /* Priority 2: Same AC queue */
+ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[ac];
+ /* Priority 3a: If new AC is VO and VI exists - use VI */
+ else if (ac == IEEE80211_AC_VO &&
+ ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VI];
+ /* Priority 3b: No BE so only AC less than the new one is BK */
+ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_BK];
+ /* Priority 4a: No BE nor BK - use VI if exists */
+ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VI];
+ /* Priority 4b: No BE, BK nor VI - use VO if exists */
+ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VO];
+
+ /* Make sure queue found (or not) is legal */
+ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
+ !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
+ IWL_ERR(mvm, "No DATA queues available to share\n");
+ return -ENOSPC;
+ }
+
+ /* Make sure the queue isn't in the middle of being reconfigured */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
+ IWL_ERR(mvm,
+ "TXQ %d is in the middle of re-config - try again\n",
+ queue);
+ return -EBUSY;
+ }
+
+ return queue;
+}
+
+/*
+ * If a given queue has a higher AC than the TID stream that is being compared
+ * to, the queue needs to be redirected to the lower AC. This function does that
+ * in such a case, otherwise - if no redirection required - it does nothing,
+ * unless the %force param is true.
+ */
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ bool shared_queue;
+ unsigned long mq;
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ /*
+ * If the AC is lower than current one - FIFO needs to be redirected to
+ * the lowest one of the streams in the queue. Check if this is needed
+ * here.
+ * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
+ * value 3 and VO with value 0, so to check if ac X is lower than ac Y
+ * we need to check if the numerical value of X is LARGER than of Y.
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "No redirection needed on TXQ #%d\n",
+ queue);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+ mq = mvm->hw_queue_to_mac80211[queue];
+ shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
+ queue, iwl_mvm_ac_to_tx_fifo[ac]);
+
+ /* Stop MAC queues and wait for this queue to empty */
+ iwl_mvm_stop_mac_queues(mvm, mq);
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
+ queue);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Before redirecting the queue we need to de-activate it */
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
+ ret);
+
+ /* Make sure the SCD wrptr is correctly set before reconfiguring */
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+
+ /* Update the TID "owner" of the queue */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
+
+ /* Redirect to lower AC */
+ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
+ cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
+
+ /* Update AC marking of the queue */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].mac80211_ac = ac;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+out:
+ /* Continue using the MAC queues */
+ iwl_mvm_start_mac_queues(mvm, mq);
+
+ return ret;
+}
+
+static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac,
+ int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating queue for sta %d on tid %d\n",
+ mvmsta->sta_id, tid);
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
+ wdg_timeout);
+ if (queue < 0)
+ return queue;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+
+ spin_lock_bh(&mvmsta->lock);
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ spin_unlock_bh(&mvmsta->lock);
+
+ return 0;
+}
+
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac, int tid,
+ struct ieee80211_hdr *hdr)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
+ .sta_id = mvmsta->sta_id,
+ .tid = tid,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+ bool using_inactive_queue = false, same_sta = false;
+ unsigned long disable_agg_tids = 0;
+ enum iwl_mvm_agg_state queue_state;
+ bool shared_queue = false, inc_ssn;
+ int ssn;
+ unsigned long tfd_queue_mask;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
+ spin_lock_bh(&mvmsta->lock);
+ tfd_queue_mask = mvmsta->tfd_queue_msk;
+ spin_unlock_bh(&mvmsta->lock);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+ * exists
+ */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE);
+ if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+ IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+ queue);
+
+ /* If no such queue is found, we'll use a DATA queue instead */
+ }
+
+ if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
+ (mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_RESERVED ||
+ mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_INACTIVE)) {
+ queue = mvmsta->reserved_queue;
+ mvm->queue_info[queue].reserved = true;
+ IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+ }
+
+ if (queue < 0)
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+
+ /*
+ * Check if this queue is already allocated but inactive.
+ * In such a case, we'll need to first free this queue before enabling
+ * it again, so we'll mark it as reserved to make sure no new traffic
+ * arrives on it
+ */
+ if (queue > 0 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+ using_inactive_queue = true;
+ same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
+ queue, mvmsta->sta_id, tid);
+ }
+
+ /* No free queue - we'll have to share */
+ if (queue <= 0) {
+ queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
+ if (queue > 0) {
+ shared_queue = true;
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
+ }
+ }
+
+ /*
+ * Mark TXQ as ready, even though it hasn't been fully configured yet,
+ * to make sure no one else takes it.
+ * This will allow avoiding re-acquiring the lock at the end of the
+ * configuration. On error we'll mark it back as free.
+ */
+ if ((queue > 0) && !shared_queue)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* This shouldn't happen - out of queues */
+ if (WARN_ON(queue <= 0)) {
+ IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
+ tid, cfg.sta_id);
+ return queue;
+ }
+
+ /*
+ * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+ * but for configuring the SCD to send A-MPDUs we need to mark the queue
+ * as aggregatable.
+ * Mark all DATA queues as allowing to be aggregated at some point
+ */
+ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ /*
+ * If this queue was previously inactive (idle) - we need to free it
+ * first
+ */
+ if (using_inactive_queue) {
+ ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+ if (ret)
+ return ret;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating %squeue #%d to sta %d on tid %d\n",
+ shared_queue ? "shared " : "", queue,
+ mvmsta->sta_id, tid);
+
+ if (shared_queue) {
+ /* Disable any open aggs on this queue */
+ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
+
+ if (disable_agg_tids) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
+ queue);
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+ }
+ }
+
+ ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+ ssn, &cfg, wdg_timeout);
+ if (inc_ssn) {
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ le16_add_cpu(&hdr->seq_ctrl, 0x10);
+ }
+
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+ spin_lock_bh(&mvmsta->lock);
+ /*
+ * This looks racy, but it is not. We have only one packet for
+ * this ra/tid in our Tx path since we stop the Qdisc when we
+ * need to allocate a new TFD queue.
+ */
+ if (inc_ssn)
+ mvmsta->tid_data[tid].seq_number += 0x10;
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+ queue_state = mvmsta->tid_data[tid].state;
+
+ if (mvmsta->reserved_queue == queue)
+ mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (!shared_queue) {
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+ if (ret)
+ goto out_err;
+
+ /* If we need to re-enable aggregations... */
+ if (queue_state == IWL_AGG_ON) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ goto out_err;
+ }
+ } else {
+ /* Redirect queue, if needed */
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
+ wdg_timeout, false);
+ if (ret)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+
+ return ret;
+}
+
+static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ return;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd = {0};
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+ if (tid == IWL_MAX_TID_COUNT)
+ return IEEE80211_AC_VO; /* MGMT */
+
+ return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff_head deferred_tx;
+ u8 mac_queue;
+ bool no_queue = false; /* Marks if there is a problem with the queue */
+ u8 ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ skb = skb_peek(&tid_data->deferred_tx_frames);
+ if (!skb)
+ return;
+ hdr = (void *)skb->data;
+
+ ac = iwl_mvm_tid_to_ac_queue(tid);
+ mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
+ iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+ IWL_ERR(mvm,
+ "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+ mvmsta->sta_id, tid);
+
+ /*
+ * Mark queue as problematic so later the deferred traffic is
+ * freed, as we can do nothing with it
+ */
+ no_queue = true;
+ }
+
+ __skb_queue_head_init(&deferred_tx);
+
+ /* Disable bottom-halves when entering TX path */
+ local_bh_disable();
+ spin_lock(&mvmsta->lock);
+ skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+ mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
+ spin_unlock(&mvmsta->lock);
+
+ while ((skb = __skb_dequeue(&deferred_tx)))
+ if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+ local_bh_enable();
+
+ /* Wake queue */
+ iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+ add_stream_wk);
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long deferred_tid_traffic;
+ int queue, sta_id, tid;
+
+ /* Check inactivity of queues */
+ iwl_mvm_inactivity_check(mvm);
+
+ mutex_lock(&mvm->mutex);
+
+ /* No queue reconfiguration in TVQM mode */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ goto alloc_queues;
+
+ /* Reconfigure queues requiring reconfiguation */
+ for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
+ bool reconfig;
+ bool change_owner;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ reconfig = (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_RECONFIGURING);
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ */
+ change_owner = !(mvm->queue_info[queue].tid_bitmap &
+ BIT(mvm->queue_info[queue].txq_tid)) &&
+ (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_SHARED);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (reconfig)
+ iwl_mvm_unshare_queue(mvm, queue);
+ else if (change_owner)
+ iwl_mvm_change_queue_owner(mvm, queue);
+ }
+
+alloc_queues:
+ /* Go over all stations with deferred traffic */
+ for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+ IWL_MVM_STATION_COUNT) {
+ clear_bit(sta_id, mvm->sta_deferred_frames);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+ for_each_set_bit(tid, &deferred_tid_traffic,
+ IWL_MAX_TID_COUNT + 1)
+ iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum nl80211_iftype vif_type)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int queue;
+ bool using_inactive_queue = false, same_sta = false;
+
+ /* queue reserving is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return 0;
+
+ /*
+ * Check for inactive queues, so we don't reach a situation where we
+ * can't add a STA due to a shortage in queues that doesn't really exist
+ */
+ iwl_mvm_inactivity_check(mvm);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure we have free resources for this STA */
+ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+ (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
+ IWL_MVM_QUEUE_FREE))
+ queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+ else
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+ /*
+ * If this queue is already allocated but inactive we'll need to
+ * first free this queue before enabling it again, we'll mark
+ * it as reserved to make sure no new traffic arrives on it
+ */
+ using_inactive_queue = true;
+ same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+ }
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ mvmsta->reserved_queue = queue;
+
+ if (using_inactive_queue)
+ iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+ queue, mvmsta->sta_id);
+
+ return 0;
+}
+
+/*
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
+ * order to avoid race conditions when there are shared queues. This function
+ * does the re-mapping and queue allocation.
+ *
+ * Note that re-enabling aggregations isn't done in this function.
+ */
+static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ int i;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvm_sta->sta_id,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ /* Make sure reserved queue is still marked as such (if allocated) */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+ mvm->queue_info[mvm_sta->reserved_queue].status =
+ IWL_MVM_QUEUE_RESERVED;
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+ int txq_id = tid_data->txq_id;
+ int ac;
+ u8 mac_queue;
+
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ skb_queue_head_init(&tid_data->deferred_tx_frames);
+
+ ac = tid_to_mac80211_ac[i];
+ mac_queue = mvm_sta->vif->hw_queue[ac];
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d\n",
+ mvm_sta->sta_id, i);
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
+ mvm_sta->sta_id,
+ i, wdg_timeout);
+ tid_data->txq_id = txq_id;
+
+ /*
+ * Since we don't set the seq number after reset, and HW
+ * sets it now, FW reset will cause the seq num to start
+ * at 0 again, so driver will need to update it
+ * internally as well, so it keeps in sync with real val
+ */
+ tid_data->seq_number = 0;
+ } else {
+ u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ cfg.tid = i;
+ cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+ cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ txq_id ==
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d to queue %d\n",
+ mvm_sta->sta_id, i, txq_id);
+
+ iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
+ wdg_timeout);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ }
+ }
+}
+
+static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr,
+ u16 mac_id, u16 color)
+{
+ struct iwl_mvm_add_sta_cmd cmd;
+ int ret;
+ u32 status = ADD_STA_SUCCESS;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = sta->sta_id;
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+ color));
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ cmd.station_type = sta->type;
+
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(0xffff);
+
+ if (addr)
+ memcpy(cmd.addr, addr, ETH_ALEN);
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+ return 0;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
+ status);
+ break;
+ }
+ return ret;
+}
+
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_rxq_dup_data *dup_data;
+ int i, ret, sta_id;
+ bool sta_update = false;
+ unsigned int sta_flags = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
+ else
+ sta_id = mvm_sta->sta_id;
+
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return -ENOSPC;
+
+ spin_lock_init(&mvm_sta->lock);
+
+ /* if this is a HW restart re-alloc existing queues */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_int_sta tmp_sta = {
+ .sta_id = sta_id,
+ .type = mvm_sta->sta_type,
+ };
+
+ /*
+ * First add an empty station since allocating
+ * a queue requires a valid station
+ */
+ ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
+ mvmvif->id, mvmvif->color);
+ if (ret)
+ goto err;
+
+ iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+ sta_update = true;
+ sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
+ goto update_fw;
+ }
+
+ mvm_sta->sta_id = sta_id;
+ mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color);
+ mvm_sta->vif = vif;
+ if (!mvm->trans->cfg->gen2)
+ mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ else
+ mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
+ mvm_sta->tx_protection = 0;
+ mvm_sta->tt_tx_protection = false;
+ mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
+
+ /* HW restart, don't assume the memory has been zeroed */
+ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
+ mvm_sta->tfd_queue_msk = 0;
+
+ /* for HW restart - reset everything but the sequence number */
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_sta->tid_data[i].seq_number;
+ memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+ mvm_sta->tid_data[i].seq_number = seq;
+
+ /*
+ * Mark all queues for this STA as unallocated and defer TX
+ * frames until the queue is allocated
+ */
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+ skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
+ }
+ mvm_sta->deferred_traffic_tid_map = 0;
+ mvm_sta->agg_tids = 0;
+
+ if (iwl_mvm_has_new_rx_api(mvm) &&
+ !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ int q;
+
+ dup_data = kcalloc(mvm->trans->num_rx_queues,
+ sizeof(*dup_data), GFP_KERNEL);
+ if (!dup_data)
+ return -ENOMEM;
+ /*
+ * Initialize all the last_seq values to 0xffff which can never
+ * compare equal to the frame's seq_ctrl in the check in
+ * iwl_mvm_is_dup() since the lower 4 bits are the fragment
+ * number and fragmented packets don't reach that function.
+ *
+ * This thus allows receiving a packet with seqno 0 and the
+ * retry bit set as the very first packet on a new TID.
+ */
+ for (q = 0; q < mvm->trans->num_rx_queues; q++)
+ memset(dup_data[q].last_seq, 0xff,
+ sizeof(dup_data[q].last_seq));
+ mvm_sta->dup_data = dup_data;
+ }
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+ ieee80211_vif_type_p2p(vif));
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * if rs is registered with mac80211, then "add station" will be handled
+ * via the corresponding ops, otherwise need to notify rate scaling here
+ */
+ if (iwl_mvm_has_tlc_offload(mvm))
+ iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
+update_fw:
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
+ if (ret)
+ goto err;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (!sta->tdls) {
+ WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
+ mvmvif->ap_sta_id = sta_id;
+ } else {
+ WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
+ }
+ }
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+ return 0;
+
+err:
+ return ret;
+}
+
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
+ mvmsta->sta_id);
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
+ mvmsta->sta_id);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+ .sta_id = sta_id,
+ };
+ int ret;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* Note: internal stations are marked as error values */
+ if (!sta) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
+ sizeof(rm_sta_cmd), &rm_sta_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ int ac;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ ac = iwl_mvm_tid_to_ac_queue(i);
+ iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+ vif->hw_queue[ac], i, 0);
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+}
+
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ u16 txq_id;
+ int ret;
+
+ spin_lock_bh(&mvm_sta->lock);
+ txq_id = mvm_sta->tid_data[i].txq_id;
+ spin_unlock_bh(&mvm_sta->lock);
+
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ u8 sta_id = mvm_sta->sta_id;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_rx_api(mvm))
+ kfree(mvm_sta->dup_data);
+
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ if (ret)
+ return ret;
+
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+ if (ret)
+ return ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+ } else {
+ u32 q_mask = mvm_sta->tfd_queue_msk;
+
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ q_mask);
+ }
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
+ /* If there is a TXQ still marked as reserved - free it */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ sta_id, reserved_txq, *status)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -EINVAL;
+ }
+
+ *status = IWL_MVM_QUEUE_FREE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == sta_id) {
+ /* if associated - we can't remove the AP STA now */
+ if (vif->bss_conf.assoc)
+ return ret;
+
+ /* unassoc - go ahead - remove the AP STA now */
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+ }
+
+ /*
+ * This shouldn't happen - the TDLS channel switch should be canceled
+ * before the STA is removed.
+ */
+ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+ cancel_delayed_work(&mvm->tdls_cs.dwork);
+ }
+
+ /*
+ * Make sure that the tx response code sees the station as -EBUSY and
+ * calls the drain worker.
+ */
+ spin_lock_bh(&mvm_sta->lock);
+ spin_unlock_bh(&mvm_sta->lock);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+
+ return ret;
+}
+
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id)
+{
+ int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
+ return ret;
+}
+
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ u32 qmask, enum nl80211_iftype iftype,
+ enum iwl_sta_type type)
+{
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ sta->sta_id == IWL_MVM_INVALID_STA) {
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
+ if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
+ return -ENOSPC;
+ }
+
+ sta->tfd_queue_msk = qmask;
+ sta->type = type;
+
+ /* put a non-NULL value so iterating over the stations won't stop */
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+ return 0;
+}
+
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
+{
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+ sta->sta_id = IWL_MVM_INVALID_STA;
+}
+
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+ u8 sta_id, u8 fifo)
+{
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int tvqm_queue =
+ iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+ *queue = tvqm_queue;
+ } else {
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = fifo,
+ .sta_id = sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
+ }
+}
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Allocate aux station and assign to it the aux queue */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
+ NL80211_IFTYPE_UNSPECIFIED,
+ IWL_STA_AUX_ACTIVITY);
+ if (ret)
+ return ret;
+
+ /* Map Aux queue to fifo - needs to happen before adding Aux station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+ mvm->aux_sta.sta_id,
+ IWL_MVM_TX_FIFO_MCAST);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+ MAC_INDEX_AUX, 0);
+ if (ret) {
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+ return ret;
+ }
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+ mvm->aux_sta.sta_id,
+ IWL_MVM_TX_FIFO_MCAST);
+
+ return 0;
+}
+
+int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Map snif queue to fifo - must happen before adding snif station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+ mvm->snif_sta.sta_id,
+ IWL_MVM_TX_FIFO_BE);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
+ mvmvif->id, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+ mvm->snif_sta.sta_id,
+ IWL_MVM_TX_FIFO_BE);
+
+ return 0;
+}
+
+int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
+ IWL_MAX_TID_COUNT, 0);
+ ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ return ret;
+}
+
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
+{
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
+}
+
+void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+}
+
+/*
+ * Send the add station command for the vif's broadcast station.
+ * Assumes that the station was already allocated.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add.
+ */
+int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
+ static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ const u8 *baddr = _baddr;
+ int queue;
+ int ret;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_VO,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ queue = mvm->probe_queue;
+ else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ queue = mvm->p2p_dev_queue;
+ else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
+ return -EINVAL;
+
+ bsta->tfd_queue_msk |= BIT(queue);
+
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
+ &cfg, wdg_timeout);
+ }
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ baddr = vif->bss_conf.bssid;
+
+ if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
+ return -ENOSPC;
+
+ ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+ if (ret)
+ return ret;
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
+ bsta->sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ mvm->probe_queue = queue;
+ else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_dev_queue = queue;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int queue;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ queue = mvm->probe_queue;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ queue = mvm->p2p_dev_queue;
+ break;
+ default:
+ WARN(1, "Can't free bcast queue on vif type %d\n",
+ vif->type);
+ return;
+ }
+
+ iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
+ mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
+}
+
+/* Send the FW a request to remove the station from it's internal data
+ * structures, but DO NOT remove the entry from the local data structures. */
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_free_bcast_sta_queues(mvm, vif);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+ return ret;
+}
+
+int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
+ ieee80211_vif_type_p2p(vif),
+ IWL_STA_GENERAL_PURPOSE);
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add. */
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, bsta);
+
+ return ret;
+}
+
+void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
+
+ iwl_mvm_dealloc_bcast_sta(mvm, vif);
+
+ return ret;
+}
+
+/*
+ * Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the multicast station is added
+ */
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
+ static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+ const u8 *maddr = _maddr;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .sta_id = msta->sta_id,
+ .tid = 0,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -ENOTSUPP;
+
+ /*
+ * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+ * invalid, so make sure we use the queue we want.
+ * Note that this is done here as we want to avoid making DQA
+ * changes in mac80211 layer.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
+ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ mvmvif->cab_queue = vif->cab_queue;
+ }
+
+ /*
+ * While in previous FWs we had to exclude cab queue from TFD queue
+ * mask, now it is needed as any other queue.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, timeout);
+ msta->tfd_queue_msk |= BIT(vif->cab_queue);
+ }
+ ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
+ mvmvif->id, mvmvif->color);
+ if (ret) {
+ iwl_mvm_dealloc_int_sta(mvm, msta);
+ return ret;
+ }
+
+ /*
+ * Enable cab queue after the ADD_STA command is sent.
+ * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
+ * command with unknown station id, and for FW that doesn't support
+ * station API since the cab queue is not included in the
+ * tfd_queue_mask.
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
+ msta->sta_id,
+ 0,
+ timeout);
+ mvmvif->cab_queue = queue;
+ } else if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, timeout);
+
+ return 0;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+
+ iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
+ 0, 0);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ return ret;
+}
+
+#define IWL_MAX_RX_BA_SESSIONS 16
+
+static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
+{
+ struct iwl_mvm_delba_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
+ .metadata.sync = 1,
+ .delba.baid = baid,
+ };
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+};
+
+static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
+ struct iwl_mvm_baid_data *data)
+{
+ int i;
+
+ iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
+
+ for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ int j;
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ if (likely(!reorder_buf->num_stored)) {
+ spin_unlock_bh(&reorder_buf->lock);
+ continue;
+ }
+
+ /*
+ * This shouldn't happen in regular DELBA since the internal
+ * delBA notification should trigger a release of all frames in
+ * the reorder buffer.
+ */
+ WARN_ON(1);
+
+ for (j = 0; j < reorder_buf->buf_size; j++)
+ __skb_queue_purge(&entries[j].e.frames);
+ /*
+ * Prevent timer re-arm. This prevents a very far fetched case
+ * where we timed out on the notification. There may be prior
+ * RX frames pending in the RX queue before the notification
+ * that might get processed between now and the actual deletion
+ * and we would re-arm the timer although we are deleting the
+ * reorder buffer.
+ */
+ reorder_buf->removed = true;
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+ }
+}
+
+static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
+ struct iwl_mvm_baid_data *data,
+ u16 ssn, u16 buf_size)
+{
+ int i;
+
+ for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+ int j;
+
+ reorder_buf->num_stored = 0;
+ reorder_buf->head_sn = ssn;
+ reorder_buf->buf_size = buf_size;
+ /* rx reorder timer */
+ timer_setup(&reorder_buf->reorder_timer,
+ iwl_mvm_reorder_timer_expired, 0);
+ spin_lock_init(&reorder_buf->lock);
+ reorder_buf->mvm = mvm;
+ reorder_buf->queue = i;
+ reorder_buf->valid = false;
+ for (j = 0; j < reorder_buf->buf_size; j++)
+ __skb_queue_head_init(&entries[j].e.frames);
+ }
+}
+
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_baid_data *baid_data = NULL;
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+ return -ENOSPC;
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm) && start) {
+ u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+
+ /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+ /*
+ * The division below will be OK if either the cache line size
+ * can be divided by the entry size (ALIGN will round up) or if
+ * if the entry size can be divided by the cache line size, in
+ * which case the ALIGN() will do nothing.
+ */
+ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+ sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+ /*
+ * Upward align the reorder buffer size to fill an entire cache
+ * line for each queue, to avoid sharing cache lines between
+ * different queues.
+ */
+ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
+ /*
+ * Allocate here so if allocation fails we can bail out early
+ * before starting the BA session in the firmware
+ */
+ baid_data = kzalloc(sizeof(*baid_data) +
+ mvm->trans->num_rx_queues *
+ reorder_buf_size,
+ GFP_KERNEL);
+ if (!baid_data)
+ return -ENOMEM;
+
+ /*
+ * This division is why we need the above BUILD_BUG_ON(),
+ * if that doesn't hold then this will not be right.
+ */
+ baid_data->entries_per_queue =
+ reorder_buf_size / sizeof(baid_data->entries[0]);
+ }
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ if (start) {
+ cmd.add_immediate_ba_tid = (u8) tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
+ } else {
+ cmd.remove_immediate_ba_tid = (u8) tid;
+ }
+ cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
+ STA_MODIFY_REMOVE_BA_TID;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ goto out_free;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ break;
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ ret = -ENOSPC;
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ if (ret)
+ goto out_free;
+
+ if (start) {
+ u8 baid;
+
+ mvm->rx_ba_sessions++;
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
+ IWL_ADD_STA_BAID_SHIFT);
+ baid_data->baid = baid;
+ baid_data->timeout = timeout;
+ baid_data->last_rx = jiffies;
+ baid_data->rcu_ptr = &mvm->baid_map[baid];
+ timer_setup(&baid_data->session_timer,
+ iwl_mvm_rx_agg_session_expired, 0);
+ baid_data->mvm = mvm;
+ baid_data->tid = tid;
+ baid_data->sta_id = mvm_sta->sta_id;
+
+ mvm_sta->tid_to_baid[tid] = baid;
+ if (timeout)
+ mod_timer(&baid_data->session_timer,
+ TU_TO_EXP_TIME(timeout * 2));
+
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
+ /*
+ * protect the BA data with RCU to cover a case where our
+ * internal RX sync mechanism will timeout (not that it's
+ * supposed to happen) and we will free the session data while
+ * RX is being processed in parallel
+ */
+ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
+ mvm_sta->sta_id, tid, baid);
+ WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
+ rcu_assign_pointer(mvm->baid_map[baid], baid_data);
+ } else {
+ u8 baid = mvm_sta->tid_to_baid[tid];
+
+ if (mvm->rx_ba_sessions > 0)
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return -EINVAL;
+
+ baid_data = rcu_access_pointer(mvm->baid_map[baid]);
+ if (WARN_ON(!baid_data))
+ return -EINVAL;
+
+ /* synchronize all rx queues so we can safely delete */
+ iwl_mvm_free_reorder(mvm, baid_data);
+ del_timer_sync(&baid_data->session_timer);
+ RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
+ kfree_rcu(baid_data, rcu_head);
+ IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
+ }
+ return 0;
+
+out_free:
+ kfree(baid_data);
+ return ret;
+}
+
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (start) {
+ mvm_sta->tfd_queue_msk |= BIT(queue);
+ mvm_sta->tid_disable_agg &= ~BIT(tid);
+ } else {
+ /* In DQA-mode the queue isn't removed on agg termination */
+ mvm_sta->tid_disable_agg |= BIT(tid);
+ }
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ return ret;
+}
+
+const u8 tid_to_mac80211_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
+};
+
+static const u8 tid_to_ucode_ac[] = {
+ AC_BE,
+ AC_BK,
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VI,
+ AC_VO,
+ AC_VO,
+};
+
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data;
+ u16 normalized_ssn;
+ u16 txq_id;
+ int ret;
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
+ mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+ IWL_ERR(mvm,
+ "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
+ mvmsta->tid_data[tid].state);
+ return -ENXIO;
+ }
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
+ iwl_mvm_has_new_tx_api(mvm)) {
+ u8 ac = tid_to_mac80211_ac[tid];
+
+ ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ /* possible race condition - we entered D0i3 while starting agg */
+ if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
+ spin_unlock_bh(&mvmsta->lock);
+ IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
+ return -EIO;
+ }
+
+ spin_lock(&mvm->queue_info_lock);
+
+ /*
+ * Note the possible cases:
+ * 1. An enabled TXQ - TXQ needs to become agg'ed
+ * 2. The TXQ hasn't yet been enabled, so find a free one and mark
+ * it as reserved
+ */
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
+ ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (ret < 0) {
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ goto release_locks;
+ }
+
+ txq_id = ret;
+
+ /* TXQ hasn't yet been enabled, so mark it only as reserved */
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+ } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
+ ret = -ENXIO;
+ IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
+ tid, IWL_MAX_HW_QUEUES - 1);
+ goto out;
+
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED)) {
+ ret = -ENXIO;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can't start tid %d agg on shared queue!\n",
+ tid);
+ goto release_locks;
+ }
+
+ spin_unlock(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "AGG for tid %d will be on queue #%d\n",
+ tid, txq_id);
+
+ tid_data = &mvmsta->tid_data[tid];
+ tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+ tid_data->txq_id = txq_id;
+ *ssn = tid_data->ssn;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->ssn,
+ tid_data->next_reclaimed);
+
+ /*
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ normalized_ssn = tid_data->ssn;
+ if (mvm->trans->cfg->gen2)
+ normalized_ssn &= 0xff;
+
+ if (normalized_ssn == tid_data->next_reclaimed) {
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+
+ ret = 0;
+ goto out;
+
+release_locks:
+ spin_unlock(&mvm->queue_info_lock);
+out:
+ spin_unlock_bh(&mvmsta->lock);
+
+ return ret;
+}
+
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
+ bool amsdu)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
+ int queue, ret;
+ bool alloc_queue = true;
+ enum iwl_mvm_queue_status queue_status;
+ u16 ssn;
+
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvmsta->sta_id,
+ .tid = tid,
+ .frame_limit = buf_size,
+ .aggregate = true,
+ };
+
+ /*
+ * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+ * manager, so this function should never be called in this case.
+ */
+ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
+ return -EINVAL;
+
+ BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
+ != IWL_MAX_TID_COUNT);
+
+ spin_lock_bh(&mvmsta->lock);
+ ssn = tid_data->ssn;
+ queue = tid_data->txq_id;
+ tid_data->state = IWL_AGG_ON;
+ mvmsta->agg_tids |= BIT(tid);
+ tid_data->ssn = 0xffff;
+ tid_data->amsdu_in_ampdu_allowed = amsdu;
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ /*
+ * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
+ * would have failed, so if we are here there is no need to
+ * allocate a queue.
+ * However, if aggregation size is different than the default
+ * size, the scheduler should be reconfigured.
+ * We cannot do this with the new TX API, so return unsupported
+ * for now, until it will be offloaded to firmware..
+ * Note that if SCD default value changes - this condition
+ * should be updated as well.
+ */
+ if (buf_size < IWL_FRAME_LIMIT)
+ return -ENOTSUPP;
+
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ goto out;
+ }
+
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ queue_status = mvm->queue_info[queue].status;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Maybe there is no need to even alloc a queue... */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
+ alloc_queue = false;
+
+ /*
+ * Only reconfig the SCD for the queue if the window size has
+ * changed from current (become smaller)
+ */
+ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
+ /*
+ * If reconfiguring an existing queue, it first must be
+ * drained
+ */
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error draining queue before reconfig\n");
+ return ret;
+ }
+
+ ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
+ mvmsta->sta_id, tid,
+ buf_size, ssn);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error reconfiguring TXQ #%d\n", queue);
+ return ret;
+ }
+ }
+
+ if (alloc_queue)
+ iwl_mvm_enable_txq(mvm, queue,
+ vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
+ &cfg, wdg_timeout);
+
+ /* Send ADD_STA command to enable aggs only if the queue isn't shared */
+ if (queue_status != IWL_MVM_QUEUE_SHARED) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ }
+
+ /* No need to mark as reserved */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+out:
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ mvmsta->max_agg_bufsize =
+ min(mvmsta->max_agg_bufsize, buf_size);
+ mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+ IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
+}
+
+static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct iwl_mvm_tid_data *tid_data)
+{
+ u16 txq_id = tid_data->txq_id;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+ tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+ int err;
+
+ /*
+ * If mac80211 is cleaning its state, then say that we finished since
+ * our state has been cleared anyway.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ return 0;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ txq_id = tid_data->txq_id;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->state);
+
+ mvmsta->agg_tids &= ~BIT(tid);
+
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+
+ switch (tid_data->state) {
+ case IWL_AGG_ON:
+ tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "ssn = %d, next_recl = %d\n",
+ tid_data->ssn, tid_data->next_reclaimed);
+
+ tid_data->ssn = 0xffff;
+ tid_data->state = IWL_AGG_OFF;
+ spin_unlock_bh(&mvmsta->lock);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+ return 0;
+ case IWL_AGG_STARTING:
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * The agg session has been stopped before it was set up. This
+ * can happen when the AddBA timer times out for example.
+ */
+
+ /* No barriers since we are under mutex */
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ tid_data->state = IWL_AGG_OFF;
+ err = 0;
+ break;
+ default:
+ IWL_ERR(mvm,
+ "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+ mvmsta->sta_id, tid, tid_data->state);
+ IWL_ERR(mvm,
+ "\ttid_data->txq_id = %d\n", tid_data->txq_id);
+ err = -EINVAL;
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ return err;
+}
+
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+ enum iwl_mvm_agg_state old_state;
+
+ /*
+ * First set the agg state to OFF to avoid calling
+ * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
+ */
+ spin_lock_bh(&mvmsta->lock);
+ txq_id = tid_data->txq_id;
+ IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->state);
+ old_state = tid_data->state;
+ tid_data->state = IWL_AGG_OFF;
+ mvmsta->agg_tids &= ~BIT(tid);
+ spin_unlock_bh(&mvmsta->lock);
+
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+
+ if (old_state >= IWL_AGG_ON) {
+ iwl_mvm_drain_sta(mvm, mvmsta, true);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
+ BIT(tid), 0))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+ } else {
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
+ }
+
+ iwl_mvm_drain_sta(mvm, mvmsta, false);
+
+ iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
+{
+ int i, max = -1, max_offs = -1;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Pick the unused key offset with the highest 'deleted'
+ * counter. Every time a key is deleted, all the counters
+ * are incremented and the one that was just deleted is
+ * reset to zero. Thus, the highest counter is the one
+ * that was deleted longest ago. Pick that one.
+ */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (test_bit(i, mvm->fw_key_table))
+ continue;
+ if (mvm->fw_key_deleted[i] > max) {
+ max = mvm->fw_key_deleted[i];
+ max_offs = i;
+ }
+ }
+
+ if (max_offs < 0)
+ return STA_KEY_IDX_INVALID;
+
+ return max_offs;
+}
+
+static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (sta)
+ return iwl_mvm_sta_from_mac80211(sta);
+
+ /*
+ * The device expects GTKs for station interfaces to be
+ * installed as GTKs for the AP station. If we have no
+ * station ID, then use AP's station ID.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+ u8 sta_id = mvmvif->ap_sta_id;
+
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /*
+ * It is possible that the 'sta' parameter is NULL,
+ * for example when a GTK is removed - the sta_id will then
+ * be the AP ID, and no station was passed by mac80211.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+ }
+
+ return NULL;
+}
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+ u32 sta_id,
+ struct ieee80211_key_conf *key, bool mcast,
+ u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+ u8 key_offset, bool mfp)
+{
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ __le16 key_flags;
+ int ret;
+ u32 status;
+ u16 keyidx;
+ u64 pn = 0;
+ int i, size;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return -EINVAL;
+
+ keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK;
+ key_flags = cpu_to_le16(keyidx);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+ if (new_api) {
+ memcpy((void *)&u.cmd.tx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ memcpy((void *)&u.cmd.rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+ pn = atomic64_read(&key->tx_pn);
+
+ } else {
+ u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ u.cmd_v1.tkip_rx_ttak[i] =
+ cpu_to_le16(tkip_p1k[i]);
+ }
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (new_api)
+ pn = atomic64_read(&key->tx_pn);
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
+ memcpy(u.cmd.common.key + 3, key->key, key->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
+ /* fall through */
+ case WLAN_CIPHER_SUITE_GCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (new_api)
+ pn = atomic64_read(&key->tx_pn);
+ break;
+ default:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ }
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+ if (mfp)
+ key_flags |= cpu_to_le16(STA_KEY_MFP);
+
+ u.cmd.common.key_offset = key_offset;
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.sta_id = sta_id;
+
+ if (new_api) {
+ u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+ size = sizeof(u.cmd);
+ } else {
+ size = sizeof(u.cmd_v1);
+ }
+
+ status = ADD_STA_SUCCESS;
+ if (cmd_flags & CMD_ASYNC)
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
+ &u.cmd);
+ else
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
+ &u.cmd, &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, bool remove_key)
+{
+ struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+
+ /* verify the key details match the required command's expectations */
+ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+ (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
+ return -EINVAL;
+
+ if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
+ return -EINVAL;
+
+ igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+ igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+ if (remove_key) {
+ /* This is a valid situation for IGTK */
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return 0;
+
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+ } else {
+ struct ieee80211_key_seq seq;
+ const u8 *pn;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ igtk_cmd.ctrl_flags |=
+ cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ pn = seq.aes_cmac.pn;
+ igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
+ ((u64) pn[4] << 8) |
+ ((u64) pn[3] << 16) |
+ ((u64) pn[2] << 24) |
+ ((u64) pn[1] << 32) |
+ ((u64) pn[0] << 40));
+ }
+
+ IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+ remove_key ? "removing" : "installing",
+ igtk_cmd.sta_id);
+
+ if (!iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+ .ctrl_flags = igtk_cmd.ctrl_flags,
+ .key_id = igtk_cmd.key_id,
+ .sta_id = igtk_cmd.sta_id,
+ .receive_seq_cnt = igtk_cmd.receive_seq_cnt
+ };
+
+ memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
+ ARRAY_SIZE(igtk_cmd_v1.igtk));
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+ sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+ }
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+ sizeof(igtk_cmd), &igtk_cmd);
+}
+
+
+static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (sta)
+ return sta->addr;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+ u8 sta_id = mvmvif->ap_sta_id;
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ return sta->addr;
+ }
+
+
+ return NULL;
+}
+
+static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset,
+ bool mcast)
+{
+ int ret;
+ const u8 *addr;
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+ u32 sta_id;
+ bool mfp = false;
+
+ if (sta) {
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ sta_id = mvm_sta->sta_id;
+ mfp = sta->mfp;
+ } else if (vif->type == NL80211_IFTYPE_AP &&
+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ sta_id = mvmvif->mcast_sta.sta_id;
+ } else {
+ IWL_ERR(mvm, "Failed to find station id\n");
+ return -EINVAL;
+ }
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ /* get phase 1 key from mac80211 */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+ seq.tkip.iv32, p1k, 0, key_offset,
+ mfp);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+ 0, NULL, 0, key_offset, mfp);
+ break;
+ default:
+ ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+ 0, NULL, 0, key_offset, mfp);
+ }
+
+ return ret;
+}
+
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+ __le16 key_flags;
+ int ret, size;
+ u32 status;
+
+ /* This is a valid situation for GTK removal */
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return 0;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ /*
+ * The fields assigned here are in the same location at the start
+ * of the command, so we can do this union trick.
+ */
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.key_offset = keyconf->hw_key_idx;
+ u.cmd.common.sta_id = sta_id;
+
+ size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+ &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset)
+{
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ struct iwl_mvm_sta *mvm_sta;
+ u8 sta_id = IWL_MVM_INVALID_STA;
+ int ret;
+ static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type != NL80211_IFTYPE_AP ||
+ keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ /* Get the station id from the mvm local station table */
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (!mvm_sta) {
+ IWL_ERR(mvm, "Failed to find station\n");
+ return -EINVAL;
+ }
+ sta_id = mvm_sta->sta_id;
+
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station
+ * table.
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+ }
+
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
+ return -EINVAL;
+ } else {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ sta_id = mvmvif->mcast_sta.sta_id;
+ }
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+ goto end;
+ }
+
+ /* If the key_offset is not pre-assigned, we need to find a
+ * new offset to use. In normal cases, the offset is not
+ * pre-assigned, but during HW_RESTART we want to reuse the
+ * same indices, so we pass them when this function is called.
+ *
+ * In D3 entry, we need to hardcoded the indices (because the
+ * firmware hardcodes the PTK offset to 0). In this case, we
+ * need to make sure we don't overwrite the hw_key_idx in the
+ * keyconf structure, because otherwise we cannot configure
+ * the original ones back when resuming.
+ */
+ if (key_offset == STA_KEY_IDX_INVALID) {
+ key_offset = iwl_mvm_set_fw_key_idx(mvm);
+ if (key_offset == STA_KEY_IDX_INVALID)
+ return -ENOSPC;
+ keyconf->hw_key_idx = key_offset;
+ }
+
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
+ if (ret)
+ goto end;
+
+ /*
+ * For WEP, the same key is used for multicast and unicast. Upload it
+ * again, using the same key offset, and now pointing the other one
+ * to the same key slot (offset).
+ * If this fails, remove the original as well.
+ */
+ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ sta) {
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
+ key_offset, !mcast);
+ if (ret) {
+ __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ goto end;
+ }
+ }
+
+ __set_bit(key_offset, mvm->fw_key_table);
+
+end:
+ IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta ? sta->addr : zero_addr, ret);
+ return ret;
+}
+
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ struct iwl_mvm_sta *mvm_sta;
+ u8 sta_id = IWL_MVM_INVALID_STA;
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Get the station from the mvm local station table */
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (mvm_sta)
+ sta_id = mvm_sta->sta_id;
+ else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
+ sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
+
+
+ IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+
+ if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+ IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+ keyconf->hw_key_idx);
+ return -ENOENT;
+ }
+
+ /* track which key was deleted last */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (mvm->fw_key_deleted[i] < U8_MAX)
+ mvm->fw_key_deleted[i]++;
+ }
+ mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
+ if (sta && !mvm_sta) {
+ IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+ return 0;
+ }
+
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ if (ret)
+ return ret;
+
+ /* delete WEP key twice to get rid of (now useless) offset */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
+
+ return ret;
+}
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ bool mfp = sta ? sta->mfp : false;
+
+ rcu_read_lock();
+
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (WARN_ON_ONCE(!mvm_sta))
+ goto unlock;
+ iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
+ iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
+ mfp);
+
+ unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->sta_id,
+ .station_flags_msk = cpu_to_le32(STA_FLG_PS),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt, u16 tids, bool more_data,
+ bool single_sta_queue)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->sta_id,
+ .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int tid, ret;
+ unsigned long _tids = tids;
+
+ /* convert TIDs to ACs - we don't support TSPEC so that's OK
+ * Note that this field is reserved and unused by firmware not
+ * supporting GO uAPSD, so it's safe to always do this.
+ */
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+ cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+ /* If we're releasing frames from aggregation or dqa queues then check
+ * if all the queues that we're releasing frames from, combined, have:
+ * - more frames than the service period, in which case more_data
+ * needs to be set
+ * - fewer than 'cnt' frames, in which case we need to adjust the
+ * firmware command (but do that unconditionally)
+ */
+ if (single_sta_queue) {
+ int remaining = cnt;
+ int sleep_tx_count;
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+ struct iwl_mvm_tid_data *tid_data;
+ u16 n_queued;
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ n_queued = iwl_mvm_tid_queued(mvm, tid_data);
+ if (n_queued > remaining) {
+ more_data = true;
+ remaining = 0;
+ break;
+ }
+ remaining -= n_queued;
+ }
+ sleep_tx_count = cnt - remaining;
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+ mvmsta->sleep_tx_count = sleep_tx_count;
+ spin_unlock_bh(&mvmsta->lock);
+
+ cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
+ if (WARN_ON(cnt - remaining == 0)) {
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+ }
+
+ /* Note: this is ignored by firmware not supporting GO uAPSD */
+ if (more_data)
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+ mvmsta->next_status_eosp = true;
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
+ } else {
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
+ }
+
+ /* block the Tx queues until the FW updated the sleep Tx count */
+ iwl_trans_block_txq_ptrs(mvm->trans, true);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
+ CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ return;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (!IS_ERR_OR_NULL(sta))
+ ieee80211_sta_eosp(sta);
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, bool disable)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->sta_id,
+ .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool disable)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvm_sta->lock);
+
+ if (mvm_sta->disable_tx == disable) {
+ spin_unlock_bh(&mvm_sta->lock);
+ return;
+ }
+
+ mvm_sta->disable_tx = disable;
+
+ /* Tell mac80211 to start/stop queuing tx for this station */
+ ieee80211_sta_block_awake(mvm->hw, sta, disable);
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
+
+ spin_unlock_bh(&mvm_sta->lock);
+}
+
+static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_int_sta *sta,
+ bool disable)
+{
+ u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = sta->sta_id,
+ .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+ .mac_id_n_color = cpu_to_le32(id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ bool disable)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Block/unblock all the stations of the given mvmvif */
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvm_sta->mac_id_n_color !=
+ FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
+ continue;
+
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
+ }
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return;
+
+ /* Need to block/unblock also multicast station */
+ if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->mcast_sta, disable);
+
+ /*
+ * Only unblock the broadcast station (FW blocks it for immediate
+ * quiet, not the driver)
+ */
+ if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->bcast_sta, disable);
+}
+
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta;
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+
+ if (!WARN_ON(!mvmsta))
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
+
+ rcu_read_unlock();
+}
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
+{
+ u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ /*
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ if (mvm->trans->cfg->gen2)
+ sn &= 0xff;
+
+ return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
new file mode 100644
index 000000000..0fc211108
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -0,0 +1,579 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __sta_h__
+#define __sta_h__
+
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+#include <linux/wait.h>
+
+#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "rs.h"
+
+struct iwl_mvm;
+struct iwl_mvm_vif;
+
+/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ * TXQ #2 - P2P device frames
+ * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ * TXQ #4 - BSS DATA frames queue
+ * TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ * TXQ #9 - P2P GO/SoftAP probe responses
+ * TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
+/**
+ * DOC: station table - introduction
+ *
+ * The station table is a list of data structure that reprensent the stations.
+ * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
+ * In GO/AP mode, the driver will have as many stations as associated clients.
+ * All these stations are reflected in the fw's station table. The driver
+ * keeps the fw's station table up to date with the ADD_STA command. Stations
+ * can be removed by the REMOVE_STA command.
+ *
+ * All the data related to a station is held in the structure %iwl_mvm_sta
+ * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
+ * This data includes the index of the station in the fw, per tid information
+ * (sequence numbers, Block-ack state machine, etc...). The stations are
+ * created and deleted by the %sta_state callback from %ieee80211_ops.
+ *
+ * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
+ * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
+ * station index. That way, the driver is able to get the tid related data in
+ * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
+ * paths are triggered by the fw, and the driver needs to get a pointer to the
+ * %ieee80211 structure. This map helps to get that pointer quickly.
+ */
+
+/**
+ * DOC: station table - locking
+ *
+ * As stated before, the station is created / deleted by mac80211's %sta_state
+ * callback from %ieee80211_ops which can sleep. The next paragraph explains
+ * the locking of a single stations, the next ones relates to the station
+ * table.
+ *
+ * The station holds the sequence number per tid. So this data needs to be
+ * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
+ * information (the state machine / and the logic that checks if the queues
+ * were drained), so it also needs to be accessible from the Tx response flow.
+ * In short, the station needs to be access from sleepable context as well as
+ * from tasklets, so the station itself needs a spinlock.
+ *
+ * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
+ * the mvm op_mode. This is possible since %sta_state can sleep.
+ * The pointers in this map are RCU protected, hence we won't replace the
+ * station while we have Tx / Tx response / BA notification running.
+ *
+ * If a station is deleted while it still has packets in its A-MPDU queues,
+ * then the reclaim flow will notice that there is no station in the map for
+ * sta_id and it will dump the responses.
+ */
+
+/**
+ * DOC: station table - internal stations
+ *
+ * The FW needs a few internal stations that are not reflected in
+ * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
+ * scanning and P2P device (during the GO negotiation).
+ * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
+ * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
+ * Usually the data for these stations is static, so no locking is required,
+ * and no TID data as this is also not needed.
+ * One thing to note, is that these stations have an ID in the fw, but not
+ * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
+ * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * pointers from this mapping need to check that the value is not error
+ * or NULL.
+ *
+ * Currently there is only one auxiliary station for scanning, initialized
+ * on init.
+ */
+
+/**
+ * DOC: station table - AP Station in STA mode
+ *
+ * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
+ * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
+ * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
+ * the AP station from the fw before setting the MAC context as unassociated.
+ * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
+ * removed by mac80211, but the station won't be removed in the fw until the
+ * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
+ */
+
+/**
+ * DOC: station table - Drain vs. Flush
+ *
+ * Flush means that all the frames in the SCD queue are dumped regardless the
+ * station to which they were sent. We do that when we disassociate and before
+ * we remove the STA of the AP. The flush can be done synchronously against the
+ * fw.
+ * Drain means that the fw will drop all the frames sent to a specific station.
+ * This is useful when a client (if we are IBSS / GO or AP) disassociates.
+ */
+
+/**
+ * DOC: station table - fw restart
+ *
+ * When the fw asserts, or we have any other issue that requires to reset the
+ * driver, we require mac80211 to reconfigure the driver. Since the private
+ * data of the stations is embed in mac80211's %ieee80211_sta, that data will
+ * not be zeroed and needs to be reinitialized manually.
+ * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
+ * that we must not allocate a new sta_id but reuse the previous one. This
+ * means that the stations being re-added after the reset will have the same
+ * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
+ * map, since the stations aren't in the fw any more. Internal stations that
+ * are not added by mac80211 will be re-added in the init flow that is called
+ * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
+ * %iwl_mvm_up.
+ */
+
+/**
+ * DOC: AP mode - PS
+ *
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
+ * AMPDUs are in a separate queue that is stopped by the fw. We just need to
+ * let mac80211 know when there are frames in these queues so that it can
+ * properly handle trigger frames.
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledge since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
+ */
+
+/**
+ * enum iwl_mvm_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_QUEUED: aggregation start work has been queued
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_mvm_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_QUEUED,
+ IWL_AGG_STARTING,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
+ * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session / DQA
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @tx_time: medium time consumed by this A-MPDU
+ * @is_tid_active: has this TID sent traffic in the last
+ * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
+ * field should be ignored.
+ * @tpt_meas_start: time of the throughput measurements start, is reset every HZ
+ * @tx_count_last: number of frames transmitted during the last second
+ * @tx_count: counts the number of frames transmitted since the last reset of
+ * tpt_meas_start
+ */
+struct iwl_mvm_tid_data {
+ struct sk_buff_head deferred_tx_frames;
+ u16 seq_number;
+ u16 next_reclaimed;
+ /* The rest is Tx AGG related */
+ u32 rate_n_flags;
+ u8 lq_color;
+ bool amsdu_in_ampdu_allowed;
+ enum iwl_mvm_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ u16 tx_time;
+ bool is_tid_active;
+ unsigned long tpt_meas_start;
+ u32 tx_count_last;
+ u32 tx_count;
+};
+
+struct iwl_mvm_key_pn {
+ struct rcu_head rcu_head;
+ struct {
+ u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
+ } ____cacheline_aligned_in_smp q[];
+};
+
+struct iwl_mvm_delba_data {
+ u32 baid;
+} __packed;
+
+struct iwl_mvm_delba_notif {
+ struct iwl_mvm_internal_rxq_notif metadata;
+ struct iwl_mvm_delba_data delba;
+} __packed;
+
+/**
+ * struct iwl_mvm_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwl_mvm_rxq_dup_data {
+ __le16 last_seq[IWL_MAX_TID_COUNT + 1];
+ u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_sta - representation of a station in the driver
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ * @mac_id_n_color: the MAC context this station is linked to
+ * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
+ * tid.
+ * @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @sta_type: station type
+ * @sta_state: station state according to enum %ieee80211_sta_state
+ * @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ * we need to signal the EOSP
+ * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
+ * and from Tx response flow, it needs a spinlock.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ * the driver - %rs_drv or in the FW - %rs_fw.
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ * Every STA has is given one reserved queue to allow it to operate. If no
+ * such queue can be guaranteed, the STA addition will fail.
+ * @tx_protection: reference counter for controlling the Tx protection.
+ * @tt_tx_protection: is thermal throttling enable Tx protection?
+ * @disable_tx: is tx to this STA disabled?
+ * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
+ * In case TLC offload is not active it is either 0xFFFF or 0.
+ * @max_amsdu_len: max AMSDU length
+ * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
+ * @sleep_tx_count: the number of frames that we told the firmware to let out
+ * even when that station is asleep. This is useful in case the queue
+ * gets empty before all the frames were sent, which can happen when
+ * we are sending frames from an AMPDU queue and there was a hole in
+ * the BA window. To be used for UAPSD only.
+ * @ptk_pn: per-queue PTK PN data structures
+ * @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is placed in that
+ * space.
+ *
+ */
+struct iwl_mvm_sta {
+ u32 sta_id;
+ u32 tfd_queue_msk;
+ u32 mac_id_n_color;
+ u16 tid_disable_agg;
+ u16 max_agg_bufsize;
+ enum iwl_sta_type sta_type;
+ enum ieee80211_sta_state sta_state;
+ bool bt_reduced_txpower;
+ bool next_status_eosp;
+ spinlock_t lock;
+ struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
+ u8 tid_to_baid[IWL_MAX_TID_COUNT];
+ union {
+ struct iwl_lq_sta_rs_fw rs_fw;
+ struct iwl_lq_sta rs_drv;
+ } lq_sta;
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_key_pn __rcu *ptk_pn[4];
+ struct iwl_mvm_rxq_dup_data *dup_data;
+
+ u16 deferred_traffic_tid_map;
+
+ u8 reserved_queue;
+
+ /* Temporary, until the new TLC will control the Tx protection */
+ s8 tx_protection;
+ bool tt_tx_protection;
+
+ bool disable_tx;
+ u16 amsdu_enabled;
+ u16 max_amsdu_len;
+ bool sleeping;
+ u8 agg_tids;
+ u8 sleep_tx_count;
+ u8 avg_energy;
+};
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
+
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+ return (void *)sta->drv_priv;
+}
+
+/**
+ * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
+ * broadcast)
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @type: station type
+ * @tfd_queue_msk: the tfd queues used by the station
+ */
+struct iwl_mvm_int_sta {
+ u32 sta_id;
+ enum iwl_sta_type type;
+ u32 tfd_queue_msk;
+};
+
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ * about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ * from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update, unsigned int flags);
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
+}
+
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta);
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset);
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* AMPDU */
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
+ bool amsdu);
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start);
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
+
+int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ u32 qmask, enum nl80211_iftype iftype,
+ enum iwl_sta_type type);
+void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
+int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt, u16 tids, bool more_data,
+ bool single_sta_queue);
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain);
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, bool disable);
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool disable);
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ bool disable);
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
+
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force);
+
+#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
new file mode 100644
index 000000000..67f360c0d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -0,0 +1,739 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include "mvm.h"
+#include "time-event.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+#define TU_TO_US(x) (x * 1024)
+#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
+
+void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta || IS_ERR(sta) || !sta->tdls)
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+ GFP_KERNEL);
+ }
+}
+
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int count = 0;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta || IS_ERR(sta) || !sta->tdls)
+ continue;
+
+ if (vif) {
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->vif != vif)
+ continue;
+ }
+
+ count++;
+ }
+
+ return count;
+}
+
+static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_tdls_config_res *resp;
+ struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = TDLS_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &tdls_cfg_cmd, },
+ .len = { sizeof(struct iwl_tdls_config_cmd), },
+ };
+ struct ieee80211_sta *sta;
+ int ret, i, cnt;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ tdls_cfg_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
+
+ /* for now the Tx cmd is empty and unused */
+
+ /* populate TDLS peer data */
+ cnt = 0;
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+ continue;
+
+ tdls_cfg_cmd.sta_info[cnt].sta_id = i;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
+ IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
+ tdls_cfg_cmd.sta_info[cnt].is_initiator =
+ cpu_to_le32(sta->tdls_initiator ? 1 : 0);
+
+ cnt++;
+ }
+
+ tdls_cfg_cmd.tdls_peer_count = cnt;
+ IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (WARN_ON_ONCE(ret))
+ return;
+
+ pkt = cmd.resp_pkt;
+
+ WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
+
+ /* we don't really care about the response at this point */
+
+ iwl_free_resp(&cmd);
+}
+
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool sta_added)
+{
+ int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
+
+ /* when the first peer joins, send a power update first */
+ if (tdls_sta_cnt == 1 && sta_added)
+ iwl_mvm_power_update_mac(mvm);
+
+ /* Configure the FW with TDLS peer info only if TDLS channel switch
+ * capability is set.
+ * TDLS config data is used currently only in TDLS channel switch code.
+ * Supposed to serve also TDLS buffer station which is not implemneted
+ * yet in FW*/
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
+ iwl_mvm_tdls_config(mvm, vif);
+
+ /* when the last peer leaves, send a power update last */
+ if (tdls_sta_cnt == 0 && !sta_added)
+ iwl_mvm_power_update_mac(mvm);
+}
+
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
+
+ /*
+ * iwl_mvm_protect_session() reads directly from the device
+ * (the system time), so make sure it is available.
+ */
+ if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
+ return;
+
+ mutex_lock(&mvm->mutex);
+ /* Protect the session to hear the TDLS setup response on the channel */
+ iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
+}
+
+static const char *
+iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
+{
+ switch (state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ return "IDLE";
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ return "REQ SENT";
+ case IWL_MVM_TDLS_SW_RESP_RCVD:
+ return "RESP RECEIVED";
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ return "REQ RECEIVED";
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ return "ACTIVE";
+ }
+
+ return NULL;
+}
+
+static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
+ enum iwl_mvm_tdls_cs_state state)
+{
+ if (mvm->tdls_cs.state == state)
+ return;
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
+ iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
+ iwl_mvm_tdls_cs_state_str(state));
+ mvm->tdls_cs.state = state;
+
+ /* we only send requests to our switching peer - update sent time */
+ if (state == IWL_MVM_TDLS_SW_REQ_SENT)
+ mvm->tdls_cs.peer.sent_timestamp =
+ iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+
+ if (state == IWL_MVM_TDLS_SW_IDLE)
+ mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
+}
+
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ unsigned int delay;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* can fail sometimes */
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+ return;
+ }
+
+ if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
+ return;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+
+ /*
+ * Update state and possibly switch again after this is over (DTIM).
+ * Also convert TU to msec.
+ */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
+}
+
+static int
+iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator, u32 timestamp)
+{
+ bool same_peer = false;
+ int ret = 0;
+
+ /* get the existing peer if it's there */
+ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
+ struct ieee80211_sta *sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta))
+ same_peer = ether_addr_equal(peer, sta->addr);
+ }
+
+ switch (mvm->tdls_cs.state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ /*
+ * might be spurious packet from the peer after the switch is
+ * already done
+ */
+ if (type == TDLS_MOVE_CH)
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ /* only allow requests from the same peer */
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
+ !peer_initiator)
+ /*
+ * We received a ch-switch request while an outgoing
+ * one is pending. Allow it if the peer is the link
+ * initiator.
+ */
+ ret = -EBUSY;
+ else if (type == TDLS_SEND_CHAN_SW_REQ)
+ /* wait for idle before sending another request */
+ ret = -EBUSY;
+ else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
+ /* we got a stale response - ignore it */
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_RESP_RCVD:
+ /*
+ * we are waiting for the FW to give an "active" notification,
+ * so ignore requests in the meantime
+ */
+ ret = -EBUSY;
+ break;
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ /* as above, allow the link initiator to proceed */
+ if (type == TDLS_SEND_CHAN_SW_REQ) {
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (peer_initiator) /* they are the initiator */
+ ret = -EBUSY;
+ } else if (type == TDLS_MOVE_CH) {
+ ret = -EINVAL;
+ }
+ break;
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ /*
+ * the only valid request when active is a request to return
+ * to the base channel by the current off-channel peer
+ */
+ if (type != TDLS_MOVE_CH || !same_peer)
+ ret = -EBUSY;
+ break;
+ }
+
+ if (ret)
+ IWL_DEBUG_TDLS(mvm,
+ "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
+ type, mvm->tdls_cs.state, peer, same_peer,
+ peer_initiator);
+
+ return ret;
+}
+
+static int
+iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator,
+ u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ u32 timestamp, u16 switch_time,
+ u16 switch_timeout, struct sk_buff *skb,
+ u32 ch_sw_tm_ie)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct iwl_tdls_channel_switch_cmd cmd = {0};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
+ timestamp);
+ if (ret)
+ return ret;
+
+ if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd.switch_type = type;
+ cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
+ cmd.timing.switch_time = cpu_to_le32(switch_time);
+ cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ ret = -ENOENT;
+ goto out;
+ }
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
+
+ if (!chandef) {
+ if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.peer.chandef.chan) {
+ /* actually moving to the channel */
+ chandef = &mvm->tdls_cs.peer.chandef;
+ } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
+ type == TDLS_MOVE_CH) {
+ /* we need to return to base channel */
+ struct ieee80211_chanctx_conf *chanctx =
+ rcu_dereference(vif->chanctx_conf);
+
+ if (WARN_ON_ONCE(!chanctx)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ chandef = &chanctx->def;
+ }
+ }
+
+ if (chandef) {
+ cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5);
+ cmd.ci.channel = chandef->chan->hw_value;
+ cmd.ci.width = iwl_mvm_get_channel_width(chandef);
+ cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+ }
+
+ /* keep quota calculation simple for now - 50% of DTIM for TDLS */
+ cmd.timing.max_offchan_duration =
+ cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int) / 2);
+
+ /* Switch time is the first element in the switch-timing IE. */
+ cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+
+ info = IEEE80211_SKB_CB(skb);
+ hdr = (void *)skb->data;
+ if (info->control.hw_key) {
+ if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+ rcu_read_unlock();
+ ret = -EINVAL;
+ goto out;
+ }
+ iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+ }
+
+ iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
+ mvmsta->sta_id);
+
+ iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
+ hdr->frame_control);
+ rcu_read_unlock();
+
+ memcpy(cmd.frame.data, skb->data, skb->len);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
+ sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
+ ret);
+ goto out;
+ }
+
+ /* channel switch has started, update state */
+ if (type != TDLS_MOVE_CH) {
+ mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
+ iwl_mvm_tdls_update_cs_state(mvm,
+ type == TDLS_SEND_CHAN_SW_REQ ?
+ IWL_MVM_TDLS_SW_REQ_SENT :
+ IWL_MVM_TDLS_SW_REQ_RCVD);
+ } else {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
+ }
+
+out:
+
+ /* channel switch failed - we are idle */
+ if (ret)
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ return ret;
+}
+
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
+{
+ struct iwl_mvm *mvm;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ unsigned int delay;
+ int ret;
+
+ mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
+ mutex_lock(&mvm->mutex);
+
+ /* called after an active channel switch has finished or timed-out */
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ /* station might be gone, in that case do nothing */
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
+ goto out;
+
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr,
+ mvm->tdls_cs.peer.initiator,
+ mvm->tdls_cs.peer.op_class,
+ &mvm->tdls_cs.peer.chandef,
+ 0, 0, 0,
+ mvm->tdls_cs.peer.skb,
+ mvm->tdls_cs.peer.ch_sw_tm_ie);
+ if (ret)
+ IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
+
+ /* retry after a DTIM if we failed sending now */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+out:
+ mutex_unlock(&mvm->mutex);
+}
+
+int
+iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ unsigned int delay;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
+ sta->addr, chandef->chan->center_freq, chandef->width);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
+ IWL_DEBUG_TDLS(mvm,
+ "Existing peer. Can't start switch with %pM\n",
+ sta->addr);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr, sta->tdls_initiator,
+ oper_class, chandef, 0, 0, 0,
+ tmpl_skb, ch_sw_tm_ie);
+ if (ret)
+ goto out;
+
+ /*
+ * Mark the peer as "in tdls switch" for this vif. We only allow a
+ * single such peer per vif.
+ */
+ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
+ if (!mvm->tdls_cs.peer.skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
+ mvm->tdls_cs.peer.chandef = *chandef;
+ mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
+ mvm->tdls_cs.peer.op_class = oper_class;
+ mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
+
+ /*
+ * Wait for 2 DTIM periods before attempting the next switch. The next
+ * switch will be made sooner if the current one completes before that.
+ */
+ delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *cur_sta;
+ bool wait_for_phy = false;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
+ IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
+ goto out;
+ }
+
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* make sure it's the same peer */
+ if (cur_sta != sta)
+ goto out;
+
+ /*
+ * If we're currently in a switch because of the now canceled peer,
+ * wait a DTIM here to make sure the phy is back on the base channel.
+ * We can't otherwise force it.
+ */
+ if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
+ mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
+ wait_for_phy = true;
+
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+ dev_kfree_skb(mvm->tdls_cs.peer.skb);
+ mvm->tdls_cs.peer.skb = NULL;
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ /* make sure the phy is on the base channel */
+ if (wait_for_phy)
+ msleep(TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int));
+
+ /* flush the channel switch state */
+ flush_delayed_work(&mvm->tdls_cs.dwork);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
+}
+
+void
+iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ enum iwl_tdls_channel_switch_type type;
+ unsigned int delay;
+ const char *action_str =
+ params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
+ "REQ" : "RESP";
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm,
+ "Received TDLS ch switch action %s from %pM status %d\n",
+ action_str, params->sta->addr, params->status);
+
+ /*
+ * we got a non-zero status from a peer we were switching to - move to
+ * the idle state and retry again later
+ */
+ if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
+ params->status != 0 &&
+ mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
+ struct ieee80211_sta *cur_sta;
+
+ /* make sure it's the same peer */
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (cur_sta == params->sta) {
+ iwl_mvm_tdls_update_cs_state(mvm,
+ IWL_MVM_TDLS_SW_IDLE);
+ goto retry;
+ }
+ }
+
+ type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
+
+ iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
+ params->sta->tdls_initiator, 0,
+ params->chandef, params->timestamp,
+ params->switch_time,
+ params->switch_timeout,
+ params->tmpl_skb,
+ params->ch_sw_tm_ie);
+
+retry:
+ /* register a timeout in case we don't succeed in switching */
+ delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
+ 1024 / 1000;
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+ mutex_unlock(&mvm->mutex);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
new file mode 100644
index 000000000..cbbc16fd0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+ IWL_MVM_TM_ATTR_UNSPEC,
+ IWL_MVM_TM_ATTR_CMD,
+ IWL_MVM_TM_ATTR_NOA_DURATION,
+ IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+ /* keep last */
+ NUM_IWL_MVM_TM_ATTRS,
+ IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+ IWL_MVM_TM_CMD_SET_NOA,
+ IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
new file mode 100644
index 000000000..cd91bc442
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -0,0 +1,921 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/jiffies.h>
+#include <net/mac80211.h>
+
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+#include "fw-api.h"
+#include "time-event.h"
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/*
+ * For the high priority TE use a time event type that has similar priority to
+ * the FW's action scan priority.
+ */
+#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
+#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
+
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ if (!te_data->vif)
+ return;
+
+ list_del(&te_data->list);
+ te_data->running = false;
+ te_data->uid = 0;
+ te_data->id = TE_MAX;
+ te_data->vif = NULL;
+}
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ /*
+ * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
+ * This will cause the TX path to drop offchannel transmissions.
+ * That would also be done by mac80211, but it is racy, in particular
+ * in the case that the time event actually completed in the firmware
+ * (which is handled in iwl_mvm_te_handle_notif).
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+
+ synchronize_net();
+
+ /*
+ * Flush the offchannel queue -- this is called when the time
+ * event finishes or is canceled, so that frames queued for it
+ * won't get stuck on the queue and be transmitted in the next
+ * time event.
+ * We have to send the command asynchronously since this cannot
+ * be under the mutex for locking reasons, but that's not an
+ * issue as it will have to complete before the next command is
+ * executed, and a new time event means a new command.
+ */
+ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+
+ /* Do the same for the P2P device queue (STA) */
+ if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ struct iwl_mvm_vif *mvmvif;
+
+ /*
+ * NB: access to this pointer would be racy, but the flush bit
+ * can only be set when we had a P2P-Device VIF, and we have a
+ * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+ * not really racy.
+ */
+
+ if (!WARN_ON(!mvm->p2p_device_vif)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
+ CMD_ASYNC);
+ }
+ }
+}
+
+static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
+{
+ /*
+ * Of course, our status bit is just as racy as mac80211, so in
+ * addition, fire off the work struct which will drop all frames
+ * from the hardware queues that made it through the race. First
+ * it will of course synchronize the TX path to make sure that
+ * any *new* TX will be rejected.
+ */
+ schedule_work(&mvm->roc_done_wk);
+}
+
+static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *csa_vif;
+
+ rcu_read_lock();
+
+ csa_vif = rcu_dereference(mvm->csa_vif);
+ if (!csa_vif || !csa_vif->csa_active)
+ goto out_unlock;
+
+ IWL_DEBUG_TE(mvm, "CSA NOA started\n");
+
+ /*
+ * CSA NoA is started but we still have beacons to
+ * transmit on the current channel.
+ * So we just do nothing here and the switch
+ * will be performed on the last TBTT.
+ */
+ if (!ieee80211_csa_is_complete(csa_vif)) {
+ IWL_WARN(mvm, "CSA NOA started too early\n");
+ goto out_unlock;
+ }
+
+ ieee80211_csa_finish(csa_vif);
+
+ rcu_read_unlock();
+
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+ return;
+
+out_unlock:
+ rcu_read_unlock();
+}
+
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+
+ if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
+ vif->bss_conf.dtim_period)
+ return false;
+ if (errmsg)
+ IWL_ERR(mvm, "%s\n", errmsg);
+
+ iwl_mvm_connection_loss(mvm, vif, errmsg);
+ return true;
+}
+
+static void
+iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ struct ieee80211_vif *vif = te_data->vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!notif->status)
+ IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
+
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_AP:
+ if (!notif->status)
+ mvmvif->csa_failed = true;
+ iwl_mvm_csa_noa_start(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!notif->status) {
+ iwl_mvm_connection_loss(mvm, vif,
+ "CSA TE failed to start");
+ break;
+ }
+ iwl_mvm_csa_client_absent(mvm, te_data->vif);
+ ieee80211_chswitch_done(te_data->vif, true);
+ break;
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /* we don't need it anymore */
+ iwl_mvm_te_clear_data(mvm, te_data);
+}
+
+static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
+ struct iwl_time_event_notif *notif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_time_event *te_trig;
+ int i;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
+ te_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(te_data->vif),
+ trig))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
+ u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
+ u32 trig_action_bitmap =
+ le32_to_cpu(te_trig->time_events[i].action_bitmap);
+ u32 trig_status_bitmap =
+ le32_to_cpu(te_trig->time_events[i].status_bitmap);
+
+ if (trig_te_id != te_data->id ||
+ !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
+ !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
+ continue;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Time event %d Action 0x%x received status: %d",
+ te_data->id,
+ le32_to_cpu(notif->action),
+ le32_to_cpu(notif->status));
+ break;
+ }
+}
+
+/*
+ * Handles a FW notification for an event that is known to the driver.
+ *
+ * @mvm: the mvm component
+ * @te_data: the time event data
+ * @notif: the notification data corresponding the time event data.
+ */
+static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
+ /*
+ * The FW sends the start/end time event notifications even for events
+ * that it fails to schedule. This is indicated in the status field of
+ * the notification. This happens in cases that the scheduler cannot
+ * find a schedule that can handle the event (for example requesting a
+ * P2P Device discoveribility, while there are other higher priority
+ * events in the system).
+ */
+ if (!le32_to_cpu(notif->status)) {
+ const char *msg;
+
+ if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
+ msg = "Time Event start notification failure";
+ else
+ msg = "Time Event end notification failure";
+
+ IWL_DEBUG_TE(mvm, "%s\n", msg);
+
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+ }
+
+ if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
+ IWL_DEBUG_TE(mvm,
+ "TE ended - current time %lu, estimated end %lu\n",
+ jiffies, te_data->end_jiffies);
+
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_roc_finished(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ "No beacon heard and the time event is over already...");
+ break;
+ default:
+ break;
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
+ te_data->running = true;
+ te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
+ ieee80211_ready_on_channel(mvm->hw);
+ } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+ iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
+ }
+ } else {
+ IWL_WARN(mvm, "Got TE with unknown action\n");
+ }
+}
+
+/*
+ * Handle A Aux ROC time event
+ */
+static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
+ struct iwl_time_event_notif *notif)
+{
+ struct iwl_mvm_time_event_data *te_data, *tmp;
+ bool aux_roc_te = false;
+
+ list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
+ if (le32_to_cpu(notif->unique_id) == te_data->uid) {
+ aux_roc_te = true;
+ break;
+ }
+ }
+ if (!aux_roc_te) /* Not a Aux ROC time event */
+ return -EINVAL;
+
+ iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
+ IWL_DEBUG_TE(mvm,
+ "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action), le32_to_cpu(notif->status));
+
+ if (!le32_to_cpu(notif->status) ||
+ le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+ /* End TE, notify mac80211 */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_roc_finished(mvm); /* flush aux queue */
+ list_del(&te_data->list); /* remove from list */
+ te_data->running = false;
+ te_data->vif = NULL;
+ te_data->uid = 0;
+ te_data->id = TE_MAX;
+ } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ te_data->running = true;
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
+ ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ } else {
+ IWL_DEBUG_TE(mvm,
+ "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
+ le32_to_cpu(notif->action));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * The Rx handler for time event notifications
+ */
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_time_event_notif *notif = (void *)pkt->data;
+ struct iwl_mvm_time_event_data *te_data, *tmp;
+
+ IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ /* This time event is triggered for Aux ROC request */
+ if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
+ goto unlock;
+
+ list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+ if (le32_to_cpu(notif->unique_id) == te_data->uid)
+ iwl_mvm_te_handle_notif(mvm, te_data, notif);
+ }
+unlock:
+ spin_unlock_bh(&mvm->time_event_lock);
+}
+
+static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_time_event_data *te_data = data;
+ struct iwl_time_event_notif *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ /* te_data->uid is already set in the TIME_EVENT_CMD response */
+ if (le32_to_cpu(resp->unique_id) != te_data->uid)
+ return false;
+
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
+ te_data->uid);
+ if (!resp->status)
+ IWL_ERR(mvm,
+ "TIME_EVENT_NOTIFICATION received but not executed\n");
+
+ return true;
+}
+
+static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_time_event_data *te_data = data;
+ struct iwl_time_event_resp *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ /* we should never get a response to another TIME_EVENT_CMD here */
+ if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
+ return false;
+
+ te_data->uid = le32_to_cpu(resp->unique_id);
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+ te_data->uid);
+ return true;
+}
+
+static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_cmd *te_cmd)
+{
+ static const u16 time_event_response[] = { TIME_EVENT_CMD };
+ struct iwl_notification_wait wait_time_event;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
+ le32_to_cpu(te_cmd->duration));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (WARN_ON(te_data->id != TE_MAX)) {
+ spin_unlock_bh(&mvm->time_event_lock);
+ return -EIO;
+ }
+ te_data->vif = vif;
+ te_data->duration = le32_to_cpu(te_cmd->duration);
+ te_data->id = le32_to_cpu(te_cmd->id);
+ list_add_tail(&te_data->list, &mvm->time_event_list);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * Use a notification wait, which really just processes the
+ * command response and doesn't wait for anything, in order
+ * to be able to process the response and get the UID inside
+ * the RX path. Using CMD_WANT_SKB doesn't work because it
+ * stores the buffer and then wakes up this thread, by which
+ * time another notification (that the time event started)
+ * might already be processed unsuccessfully.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+ time_event_response,
+ ARRAY_SIZE(time_event_response),
+ iwl_mvm_time_event_response, te_data);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(*te_cmd), te_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+ iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+ goto out_clear_te;
+ }
+
+ /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+ /* should never fail */
+ WARN_ON_ONCE(ret);
+
+ if (ret) {
+ out_clear_te:
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+ return ret;
+}
+
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ u32 max_delay, bool wait_for_notif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+ struct iwl_notification_wait wait_te_notif;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (te_data->running &&
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ return;
+ }
+
+ if (te_data->running) {
+ IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
+ te_data->uid,
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ /*
+ * we don't have enough time
+ * cancel the current TE and issue a new one
+ * Of course it would be better to remove the old one only
+ * when the new one is added, but we don't care if we are off
+ * channel for a bit. All we need to do, is not to return
+ * before we actually begin to be on the channel.
+ */
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+ time_cmd.apply_time = cpu_to_le32(0);
+
+ time_cmd.max_frags = TE_V2_FRAG_NONE;
+ time_cmd.max_delay = cpu_to_le32(max_delay);
+ /* TODO: why do we need to interval = bi if it is not periodic? */
+ time_cmd.interval = cpu_to_le32(1);
+ time_cmd.duration = cpu_to_le32(duration);
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END |
+ TE_V2_START_IMMEDIATELY);
+
+ if (!wait_for_notif) {
+ iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+ return;
+ }
+
+ /*
+ * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
+ * right after we send the time event
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
+ te_notif_response,
+ ARRAY_SIZE(te_notif_response),
+ iwl_mvm_te_notif, te_data);
+
+ /* If TE was sent OK - wait for the notification that started */
+ if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
+ IWL_ERR(mvm, "Failed to add TE to protect session\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
+ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
+ TU_TO_JIFFIES(max_delay))) {
+ IWL_ERR(mvm, "Failed to protect session until TE\n");
+ }
+}
+
+static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ u32 *uid)
+{
+ u32 id;
+
+ /*
+ * It is possible that by the time we got to this point the time
+ * event was already removed.
+ */
+ spin_lock_bh(&mvm->time_event_lock);
+
+ /* Save time event uid before clearing its data */
+ *uid = te_data->uid;
+ id = te_data->id;
+
+ /*
+ * The clear_data function handles time events that were already removed
+ */
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * It is possible that by the time we try to remove it, the time event
+ * has already ended and removed. In such a case there is no need to
+ * send a removal command.
+ */
+ if (id == TE_MAX) {
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Explicit request to remove a aux roc time event. The removal of a time
+ * event needs to be synchronized with the flow of a time event's end
+ * notification, which also removes the time event from the op mode
+ * data structures.
+ */
+static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_hs20_roc_req aux_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
+ aux_cmd.event_unique_id = cpu_to_le32(uid);
+ aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ aux_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
+ le32_to_cpu(aux_cmd.event_unique_id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
+ sizeof(aux_cmd), &aux_cmd);
+
+ if (WARN_ON(ret))
+ return;
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_time_event_cmd time_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
+ /* When we remove a TE, the UID is to be set in the id field */
+ time_cmd.id = cpu_to_le32(uid);
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+ IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(time_cmd), &time_cmd);
+ if (WARN_ON(ret))
+ return;
+}
+
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ u32 id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+ IWL_DEBUG_TE(mvm,
+ "don't remove TE with id=%u (not session protection)\n",
+ id);
+ return;
+ }
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration, enum ieee80211_roc_type type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+ if (te_data->running) {
+ IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+ return -EBUSY;
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid ROC type\n");
+ return -EINVAL;
+ }
+
+ time_cmd.apply_time = cpu_to_le32(0);
+ time_cmd.interval = cpu_to_le32(1);
+
+ /*
+ * The P2P Device TEs can have lower priority than other events
+ * that are being scheduled by the driver/fw, and thus it might not be
+ * scheduled. To improve the chances of it being scheduled, allow them
+ * to be fragmented, and in addition allow them to be delayed.
+ */
+ time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
+ time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
+ time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END |
+ TE_V2_START_IMMEDIATELY);
+
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_time_event_data *te_data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+
+ /*
+ * Iterate over the list of time events and find the time event that is
+ * associated with a P2P_DEVICE interface.
+ * This assumes that a P2P_DEVICE interface can have only a single time
+ * event at any given time and this time event coresponds to a ROC
+ * request
+ */
+ list_for_each_entry(te_data, &mvm->time_event_list, list) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ goto out;
+ }
+
+ /* There can only be at most one AUX ROC time event, we just use the
+ * list to simplify/unify code. Remove it if it exists.
+ */
+ te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+ struct iwl_mvm_time_event_data,
+ list);
+out:
+ spin_unlock_bh(&mvm->time_event_lock);
+ return te_data;
+}
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_time_event_data *te_data;
+ u32 uid;
+
+ te_data = iwl_mvm_get_roc_te(mvm);
+ if (te_data)
+ __iwl_mvm_remove_time_event(mvm, te_data, &uid);
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_time_event_data *te_data;
+
+ te_data = iwl_mvm_get_roc_te(mvm);
+ if (!te_data) {
+ IWL_WARN(mvm, "No remain on channel event\n");
+ return;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ } else {
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+ }
+
+ iwl_mvm_roc_finished(mvm);
+}
+
+int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 apply_time)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (te_data->running) {
+ u32 id;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (id == TE_CHANNEL_SWITCH_PERIOD) {
+ IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Remove the session protection time event to allow the
+ * channel switch. If we got here, we just heard a beacon so
+ * the session protection is not needed anymore anyway.
+ */
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
+ time_cmd.apply_time = cpu_to_le32(apply_time);
+ time_cmd.max_frags = TE_V2_FRAG_NONE;
+ time_cmd.duration = cpu_to_le32(duration);
+ time_cmd.repeat = 1;
+ time_cmd.interval = cpu_to_le32(1);
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_ABSENCE);
+ if (!apply_time)
+ time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
+
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
new file mode 100644
index 000000000..3d2e8b615
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -0,0 +1,250 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __time_event_h__
+#define __time_event_h__
+
+#include "fw-api.h"
+
+#include "mvm.h"
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/**
+ * DOC: Abstraction to the driver
+ *
+ * In order to simplify the use of time events to the rest of the driver,
+ * we abstract the use of time events. This component provides the functions
+ * needed by the driver.
+ */
+
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600
+#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mvm_protect_session - start / extend the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ * @duration: the duration of the session in TU.
+ * @min_duration: will start a new session if the current session will end
+ * in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
+ * @wait_for_notif: true if it is required that a time event notification be
+ * waited for (that the time event has been scheduled before returning)
+ *
+ * This function can be used to start a session protection which means that the
+ * fw will stay on the channel for %duration_ms milliseconds. This function
+ * can block (sleep) until the session starts. This function can also be used
+ * to extend a currently running session.
+ * This function is meant to be used for BSS association for example, where we
+ * want to make sure that the fw stays on the channel during the association.
+ */
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ u32 max_delay, bool wait_for_notif);
+
+/**
+ * iwl_mvm_stop_session_protection - cancel the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be canceled because
+ * the other bindings wait for the medium during that time.
+ * This funtions doesn't sleep.
+ */
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/*
+ * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
+ */
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/**
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is requested. It is assumed
+ * that the vif type is NL80211_IFTYPE_P2P_DEVICE
+ * @duration: the requested duration in millisecond for the fw to be on the
+ * channel that is bound to the vif.
+ * @type: the remain on channel request type
+ *
+ * This function can be used to issue a remain on channel session,
+ * which means that the fw will stay in the channel for the request %duration
+ * milliseconds. The function is async, meaning that it only issues the ROC
+ * request but does not wait for it to start. Once the FW is ready to serve the
+ * ROC request, it will issue a notification to the driver that it is on the
+ * requested channel. Once the FW completes the ROC request it will issue
+ * another notification to the driver.
+ */
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration, enum ieee80211_roc_type type);
+
+/**
+ * iwl_mvm_stop_roc - stop remain on channel functionality
+ * @mvm: the mvm component
+ *
+ * This function can be used to cancel an ongoing ROC session.
+ * The function is async, it will instruct the FW to stop serving the ROC
+ * session, but will not wait for the actual stopping of the session.
+ */
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
+
+/**
+ * iwl_mvm_remove_time_event - general function to clean up of time event
+ * @mvm: the mvm component
+ * @vif: the vif to which the time event belongs
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function can be used to cancel a time event regardless its type.
+ * It is useful for cleaning up time events running before removing an
+ * interface.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data);
+
+/**
+ * iwl_mvm_te_clear_data - remove time event from list
+ * @mvm: the mvm component
+ * @te_data: the time event data to remove
+ *
+ * This function is mostly internal, it is made available here only
+ * for firmware restart purposes.
+ */
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data);
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm);
+void iwl_mvm_roc_done_wk(struct work_struct *wk);
+
+/**
+ * iwl_mvm_schedule_csa_period - request channel switch absence period
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the channel switch is issued
+ * @duration: the duration of the NoA in TU.
+ * @apply_time: NoA start time in GP2.
+ *
+ * This function is used to schedule NoA time event and is used to perform
+ * the channel switch flow.
+ */
+int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 apply_time);
+
+/**
+ * iwl_mvm_te_scheduled - check if the fw received the TE cmd
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function returns true iff this TE is added to the fw.
+ */
+static inline bool
+iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
+{
+ if (!te_data)
+ return false;
+
+ return !!te_data->uid;
+}
+
+#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
new file mode 100644
index 000000000..2d0b8a391
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -0,0 +1,310 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw/api/tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return;
+
+ memset(tof_data, 0, sizeof(*tof_data));
+
+ tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (IWL_MVM_TOF_IS_RESPONDER) {
+ tof_data->responder_cfg.sub_grp_cmd_id =
+ cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+ tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
+ }
+#endif
+
+ tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+ tof_data->range_req.req_timeout = 1;
+ tof_data->range_req.initiator = 1;
+ tof_data->range_req.report_policy = 3;
+
+ tof_data->range_req_ext.sub_grp_cmd_id =
+ cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
+ !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
+ return;
+
+ memset(tof_data, 0, sizeof(*tof_data));
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *enabled = _data;
+
+ /* non bss vif exists */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ *enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+ bool enabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_tof_iterator, &enabled);
+ if (!enabled) {
+ IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+ return -EINVAL;
+ }
+
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+ struct iwl_tof_range_abort_cmd cmd = {
+ .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+ .request_id = id,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (id != mvm->tof_data.active_range_request) {
+ IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+ id, mvm->tof_data.active_range_request);
+ return -EINVAL;
+ }
+
+ /* after abort is sent there's no active request anymore */
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+ !mvmvif->ap_ibss_active) {
+ IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+ return -EIO;
+ }
+
+ cmd->sta_id = mvmvif->bcast_sta.sta_id;
+ memcpy(cmd->bssid, vif->addr, ETH_ALEN);
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .len = { sizeof(mvm->tof_data.range_req), },
+ /* no copy because of the command size */
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+ return -EIO;
+ }
+
+ /* nesting of range requests is not supported in FW */
+ if (mvm->tof_data.active_range_request !=
+ IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+ IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+ mvm->tof_data.active_range_request);
+ return -EIO;
+ }
+
+ mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+ cmd.data[0] = &mvm->tof_data.range_req;
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+ return -EINVAL;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+ return -EIO;
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+ IWL_ALWAYS_LONG_GROUP, 0),
+ 0, sizeof(mvm->tof_data.range_req_ext),
+ &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+ if (resp->request_id != mvm->tof_data.active_range_request) {
+ IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+ resp->request_id, mvm->tof_data.active_range_request);
+ return -EIO;
+ }
+
+ memcpy(&mvm->tof_data.range_resp, resp,
+ sizeof(struct iwl_tof_range_rsp_ntfy));
+ mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+ return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+ IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+ return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+ struct iwl_tof_neighbor_report *report =
+ (struct iwl_tof_neighbor_report *)data;
+
+ IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+ report->bssid, report->request_token, report->status);
+ return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+ case TOF_RANGE_RESPONSE_NOTIF:
+ iwl_mvm_tof_range_resp(mvm, resp->data);
+ break;
+ case TOF_MCSI_DEBUG_NOTIF:
+ iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+ break;
+ case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+ iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+ break;
+ default:
+ IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+ resp->sub_grp_cmd_id);
+ break;
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
new file mode 100644
index 000000000..2ff560aa1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof_h__
+#define __tof_h__
+
+#include "fw/api/tof.h"
+
+struct iwl_mvm_tof_data {
+ struct iwl_tof_config_cmd tof_cfg;
+ struct iwl_tof_range_req_cmd range_req;
+ struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+ struct iwl_tof_range_rsp_ntfy range_resp;
+ u8 last_abort_id;
+ u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
new file mode 100644
index 000000000..319103f4b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -0,0 +1,909 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/sort.h>
+
+#include "mvm.h"
+
+#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
+
+void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+ u32 duration = tt->params.ct_kill_duration;
+
+ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return;
+
+ IWL_ERR(mvm, "Enter CT Kill\n");
+ iwl_mvm_set_hw_ctkill_state(mvm, true);
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ tt->throttle = false;
+ tt->dynamic_smps = false;
+ }
+
+ /* Don't schedule an exit work if we're in test mode, since
+ * the temperature will not change unless we manually set it
+ * again (or disable testing).
+ */
+ if (!mvm->temperature_test)
+ schedule_delayed_work(&tt->ct_kill_exit,
+ round_jiffies_relative(duration * HZ));
+}
+
+static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
+{
+ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return;
+
+ IWL_ERR(mvm, "Exit CT Kill\n");
+ iwl_mvm_set_hw_ctkill_state(mvm, false);
+}
+
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+{
+ /* ignore the notification if we are in test mode */
+ if (mvm->temperature_test)
+ return;
+
+ if (mvm->temperature == temp)
+ return;
+
+ mvm->temperature = temp;
+ iwl_mvm_tt_handler(mvm);
+}
+
+static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_dts_measurement_notif_v1 *notif_v1;
+ int len = iwl_rx_packet_payload_len(pkt);
+ int temp;
+
+ /* we can use notif_v1 only, because v2 only adds an additional
+ * parameter, which is not used in this function.
+ */
+ if (WARN_ON_ONCE(len < sizeof(*notif_v1))) {
+ IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+ return -EINVAL;
+ }
+
+ notif_v1 = (void *)pkt->data;
+
+ temp = le32_to_cpu(notif_v1->temp);
+
+ /* shouldn't be negative, but since it's s32, make sure it isn't */
+ if (WARN_ON_ONCE(temp < 0))
+ temp = 0;
+
+ IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
+
+ return temp;
+}
+
+static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ int *temp = data;
+ int ret;
+
+ ret = iwl_mvm_temp_notif_parse(mvm, pkt);
+ if (ret < 0)
+ return true;
+
+ *temp = ret;
+
+ return true;
+}
+
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_dts_measurement_notif_v2 *notif_v2;
+ int len = iwl_rx_packet_payload_len(pkt);
+ int temp;
+ u32 ths_crossed;
+
+ /* the notification is handled synchronously in ctkill, so skip here */
+ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return;
+
+ temp = iwl_mvm_temp_notif_parse(mvm, pkt);
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ if (temp >= 0)
+ iwl_mvm_tt_temp_changed(mvm, temp);
+ return;
+ }
+
+ if (WARN_ON_ONCE(len < sizeof(*notif_v2))) {
+ IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+ return;
+ }
+
+ notif_v2 = (void *)pkt->data;
+ ths_crossed = le32_to_cpu(notif_v2->threshold_idx);
+
+ /* 0xFF in ths_crossed means the notification is not related
+ * to a trip, so we can ignore it here.
+ */
+ if (ths_crossed == 0xFF)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n",
+ temp, ths_crossed);
+
+#ifdef CONFIG_THERMAL
+ if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
+ return;
+
+ if (mvm->tz_device.tzone) {
+ struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
+
+ thermal_notify_framework(tz_dev->tzone,
+ tz_dev->fw_trips_index[ths_crossed]);
+ }
+#endif /* CONFIG_THERMAL */
+}
+
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct ct_kill_notif *notif;
+ int len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON_ONCE(len != sizeof(*notif))) {
+ IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
+ return;
+ }
+
+ notif = (struct ct_kill_notif *)pkt->data;
+ IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
+ notif->temperature);
+
+ iwl_mvm_enter_ctkill(mvm);
+}
+
+static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_dts_measurement_cmd cmd = {
+ .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
+ };
+ struct iwl_ext_dts_measurement_cmd extcmd = {
+ .control_mode = cpu_to_le32(DTS_AUTOMATIC),
+ };
+ u32 cmdid;
+
+ cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ PHY_OPS_GROUP, 0);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
+ return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
+}
+
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
+{
+ struct iwl_notification_wait wait_temp_notif;
+ static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
+ DTS_MEASUREMENT_NOTIF_WIDE) };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
+ temp_notif, ARRAY_SIZE(temp_notif),
+ iwl_mvm_temp_notif_wait, temp);
+
+ ret = iwl_mvm_get_temp_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
+ iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
+ IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
+ if (ret)
+ IWL_ERR(mvm, "Getting the temperature timed out\n");
+
+ return ret;
+}
+
+static void check_exit_ctkill(struct work_struct *work)
+{
+ struct iwl_mvm_tt_mgmt *tt;
+ struct iwl_mvm *mvm;
+ u32 duration;
+ s32 temp;
+ int ret;
+
+ tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
+ mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
+
+ if (iwl_mvm_is_tt_in_fw(mvm)) {
+ iwl_mvm_exit_ctkill(mvm);
+
+ return;
+ }
+
+ duration = tt->params.ct_kill_duration;
+
+ mutex_lock(&mvm->mutex);
+
+ if (__iwl_mvm_mac_start(mvm))
+ goto reschedule;
+
+ /* make sure the device is available for direct read/writes */
+ if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) {
+ __iwl_mvm_mac_stop(mvm);
+ goto reschedule;
+ }
+
+ ret = iwl_mvm_get_temp(mvm, &temp);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
+
+ __iwl_mvm_mac_stop(mvm);
+
+ if (ret)
+ goto reschedule;
+
+ IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
+
+ if (temp <= tt->params.ct_kill_exit) {
+ mutex_unlock(&mvm->mutex);
+ iwl_mvm_exit_ctkill(mvm);
+ return;
+ }
+
+reschedule:
+ mutex_unlock(&mvm->mutex);
+ schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+ round_jiffies(duration * HZ));
+}
+
+static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ enum ieee80211_smps_mode smps_mode;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->thermal_throttle.dynamic_smps)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
+}
+
+static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
+{
+ struct iwl_mvm_sta *mvmsta;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
+ if (!mvmsta)
+ continue;
+
+ if (enable == mvmsta->tt_tx_protection)
+ continue;
+ err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
+ if (err) {
+ IWL_ERR(mvm, "Failed to %s Tx protection\n",
+ enable ? "enable" : "disable");
+ } else {
+ IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
+ enable ? "Enable" : "Disable");
+ mvmsta->tt_tx_protection = enable;
+ }
+ }
+}
+
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_THERMAL_MNG_BACKOFF,
+ .len = { sizeof(u32), },
+ .data = { &backoff, },
+ };
+
+ backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
+ if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
+ IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
+ backoff);
+ mvm->thermal_throttle.tx_backoff = backoff;
+ } else {
+ IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
+ }
+}
+
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
+{
+ struct iwl_tt_params *params = &mvm->thermal_throttle.params;
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+ s32 temperature = mvm->temperature;
+ bool throttle_enable = false;
+ int i;
+ u32 tx_backoff;
+
+ IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
+
+ if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
+ iwl_mvm_enter_ctkill(mvm);
+ return;
+ }
+
+ if (params->support_ct_kill &&
+ temperature <= params->ct_kill_exit) {
+ iwl_mvm_exit_ctkill(mvm);
+ return;
+ }
+
+ if (params->support_dynamic_smps) {
+ if (!tt->dynamic_smps &&
+ temperature >= params->dynamic_smps_entry) {
+ IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
+ tt->dynamic_smps = true;
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tt_smps_iterator, mvm);
+ throttle_enable = true;
+ } else if (tt->dynamic_smps &&
+ temperature <= params->dynamic_smps_exit) {
+ IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
+ tt->dynamic_smps = false;
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tt_smps_iterator, mvm);
+ }
+ }
+
+ if (params->support_tx_protection) {
+ if (temperature >= params->tx_protection_entry) {
+ iwl_mvm_tt_tx_protection(mvm, true);
+ throttle_enable = true;
+ } else if (temperature <= params->tx_protection_exit) {
+ iwl_mvm_tt_tx_protection(mvm, false);
+ }
+ }
+
+ if (params->support_tx_backoff) {
+ tx_backoff = tt->min_backoff;
+ for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
+ if (temperature < params->tx_backoff[i].temperature)
+ break;
+ tx_backoff = max(tt->min_backoff,
+ params->tx_backoff[i].backoff);
+ }
+ if (tx_backoff != tt->min_backoff)
+ throttle_enable = true;
+ if (tt->tx_backoff != tx_backoff)
+ iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
+ }
+
+ if (!tt->throttle && throttle_enable) {
+ IWL_WARN(mvm,
+ "Due to high temperature thermal throttling initiated\n");
+ tt->throttle = true;
+ } else if (tt->throttle && !tt->dynamic_smps &&
+ tt->tx_backoff == tt->min_backoff &&
+ temperature <= params->tx_protection_exit) {
+ IWL_WARN(mvm,
+ "Temperature is back to normal thermal throttling stopped\n");
+ tt->throttle = false;
+ }
+}
+
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 200},
+ {.temperature = 113, .backoff = 600},
+ {.temperature = 114, .backoff = 1200},
+ {.temperature = 115, .backoff = 2000},
+ {.temperature = 116, .backoff = 4000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+/* budget in mWatt */
+static const u32 iwl_mvm_cdev_budgets[] = {
+ 2000, /* cooling state 0 */
+ 1800, /* cooling state 1 */
+ 1600, /* cooling state 2 */
+ 1400, /* cooling state 3 */
+ 1200, /* cooling state 4 */
+ 1000, /* cooling state 5 */
+ 900, /* cooling state 6 */
+ 800, /* cooling state 7 */
+ 700, /* cooling state 8 */
+ 650, /* cooling state 9 */
+ 600, /* cooling state 10 */
+ 550, /* cooling state 11 */
+ 500, /* cooling state 12 */
+ 450, /* cooling state 13 */
+ 400, /* cooling state 14 */
+ 350, /* cooling state 15 */
+ 300, /* cooling state 16 */
+ 250, /* cooling state 17 */
+ 200, /* cooling state 18 */
+ 150, /* cooling state 19 */
+};
+
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
+{
+ struct iwl_mvm_ctdp_cmd cmd = {
+ .operation = cpu_to_le32(op),
+ .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
+ .window_size = 0,
+ };
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
+ CTDP_CONFIG_CMD),
+ sizeof(cmd), &cmd, &status);
+
+ if (ret) {
+ IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ switch (op) {
+ case CTDP_CMD_OPERATION_START:
+#ifdef CONFIG_THERMAL
+ mvm->cooling_dev.cur_state = state;
+#endif /* CONFIG_THERMAL */
+ break;
+ case CTDP_CMD_OPERATION_REPORT:
+ IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
+ /* when the function is called with CTDP_CMD_OPERATION_REPORT
+ * option the function should return the average budget value
+ * that is received from the FW.
+ * The budget can't be less or equal to 0, so it's possible
+ * to distinguish between error values and budgets.
+ */
+ return status;
+ case CTDP_CMD_OPERATION_STOP:
+ IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n");
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_THERMAL
+static int compare_temps(const void *a, const void *b)
+{
+ return ((s16)le16_to_cpu(*(__le16 *)a) -
+ (s16)le16_to_cpu(*(__le16 *)b));
+}
+
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
+{
+ struct temp_report_ths_cmd cmd = {0};
+ int ret, i, j, idx = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->tz_device.tzone)
+ return -EINVAL;
+
+ /* The driver holds array of temperature trips that are unsorted
+ * and uncompressed, the FW should get it compressed and sorted
+ */
+
+ /* compress temp_trips to cmd array, remove uninitialized values*/
+ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
+ if (mvm->tz_device.temp_trips[i] != S16_MIN) {
+ cmd.thresholds[idx++] =
+ cpu_to_le16(mvm->tz_device.temp_trips[i]);
+ }
+ }
+ cmd.num_temps = cpu_to_le32(idx);
+
+ if (!idx)
+ goto send;
+
+ /*sort cmd array*/
+ sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
+
+ /* we should save the indexes of trips because we sort
+ * and compress the orginal array
+ */
+ for (i = 0; i < idx; i++) {
+ for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
+ if (le16_to_cpu(cmd.thresholds[i]) ==
+ mvm->tz_device.temp_trips[j])
+ mvm->tz_device.fw_trips_index[i] = j;
+ }
+ }
+
+send:
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+ TEMP_REPORTING_THRESHOLDS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+ ret);
+
+ return ret;
+}
+
+static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
+ int *temperature)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+ int ret;
+ int temp;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = iwl_mvm_get_temp(mvm, &temp);
+ if (ret)
+ goto out;
+
+ *temperature = temp * 1000;
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device,
+ int trip, int *temp)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+
+ if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
+ return -EINVAL;
+
+ *temp = mvm->tz_device.temp_trips[trip] * 1000;
+
+ return 0;
+}
+
+static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device,
+ int trip, enum thermal_trip_type *type)
+{
+ if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
+ return -EINVAL;
+
+ *type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
+ int trip, int temp)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+ struct iwl_mvm_thermal_device *tzone;
+ int i, ret;
+ s16 temperature;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((temp / 1000) > S16_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ temperature = (s16)(temp / 1000);
+ tzone = &mvm->tz_device;
+
+ if (!tzone) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* no updates*/
+ if (tzone->temp_trips[trip] == temperature) {
+ ret = 0;
+ goto out;
+ }
+
+ /* already existing temperature */
+ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
+ if (tzone->temp_trips[i] == temperature) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ tzone->temp_trips[trip] = temperature;
+
+ ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = iwl_mvm_tzone_get_temp,
+ .get_trip_temp = iwl_mvm_tzone_get_trip_temp,
+ .get_trip_type = iwl_mvm_tzone_get_trip_type,
+ .set_trip_temp = iwl_mvm_tzone_set_trip_temp,
+};
+
+/* make all trips writable */
+#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
+
+static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
+{
+ int i;
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ mvm->tz_device.tzone = NULL;
+
+ return;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ mvm->tz_device.tzone = thermal_zone_device_register(name,
+ IWL_MAX_DTS_TRIPS,
+ IWL_WRITABLE_TRIPS_MSK,
+ mvm, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(mvm->tz_device.tzone)) {
+ IWL_DEBUG_TEMP(mvm,
+ "Failed to register to thermal zone (err = %ld)\n",
+ PTR_ERR(mvm->tz_device.tzone));
+ mvm->tz_device.tzone = NULL;
+ return;
+ }
+
+ /* 0 is a valid temperature,
+ * so initialize the array with S16_MIN which invalid temperature
+ */
+ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++)
+ mvm->tz_device.temp_trips[i] = S16_MIN;
+}
+
+static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
+
+ return 0;
+}
+
+static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+
+ *state = mvm->cooling_dev.cur_state;
+
+ return 0;
+}
+
+static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ new_state);
+
+unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops tcooling_ops = {
+ .get_max_state = iwl_mvm_tcool_get_max_state,
+ .get_cur_state = iwl_mvm_tcool_get_cur_state,
+ .set_cur_state = iwl_mvm_tcool_set_cur_state,
+};
+
+static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
+{
+ char name[] = "iwlwifi";
+
+ if (!iwl_mvm_is_ctdp_supported(mvm))
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ mvm->cooling_dev.cdev =
+ thermal_cooling_device_register(name,
+ mvm,
+ &tcooling_ops);
+
+ if (IS_ERR(mvm->cooling_dev.cdev)) {
+ IWL_DEBUG_TEMP(mvm,
+ "Failed to register to cooling device (err = %ld)\n",
+ PTR_ERR(mvm->cooling_dev.cdev));
+ mvm->cooling_dev.cdev = NULL;
+ return;
+ }
+}
+
+static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
+{
+ if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
+ if (mvm->tz_device.tzone) {
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ mvm->tz_device.tzone = NULL;
+ }
+}
+
+static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
+{
+ if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
+ if (mvm->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+ mvm->cooling_dev.cdev = NULL;
+ }
+}
+#endif /* CONFIG_THERMAL */
+
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
+{
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+
+ IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
+
+ if (mvm->cfg->thermal_params)
+ tt->params = *mvm->cfg->thermal_params;
+ else
+ tt->params = iwl_mvm_default_tt_params;
+
+ tt->throttle = false;
+ tt->dynamic_smps = false;
+ tt->min_backoff = min_backoff;
+ INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+ iwl_mvm_cooling_device_register(mvm);
+ iwl_mvm_thermal_zone_register(mvm);
+#endif
+ mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+}
+
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
+{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE))
+ return;
+
+ cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
+ IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
+
+#ifdef CONFIG_THERMAL
+ iwl_mvm_cooling_device_unregister(mvm);
+ iwl_mvm_thermal_zone_unregister(mvm);
+#endif
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
new file mode 100644
index 000000000..449e3d328
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -0,0 +1,2006 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "iwl-trans.h"
+#include "iwl-eeprom-parse.h"
+#include "mvm.h"
+#include "sta.h"
+
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+ u16 tid, u16 ssn)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "BAR sent to %pM, tid %d, ssn %d",
+ addr, tid, ssn);
+}
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info,
+ u16 offload_assist)
+{
+#if IS_ENABLED(CONFIG_INET)
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u8 protocol = 0;
+
+ /*
+ * Do not compute checksum if already computed or if transport will
+ * compute it
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ goto out;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ struct ipv6_opt_hdr *hp;
+
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /*
+ * Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+ /*
+ * mac header len should include IV, size is in words unless
+ * the IV is added by the firmware like in WEP.
+ * In new Tx API, the IV is always added by the firmware.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
+ mh_len += info->control.hw_key->iv_len;
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+out:
+#endif
+ return offload_assist;
+}
+
+/*
+ * Sets most of the Tx cmd's fields
+ */
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
+ u32 len = skb->len + FCS_LEN;
+ u16 offload_assist = 0;
+ u8 ac;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ tx_flags |= TX_CMD_FLG_ACK;
+ else
+ tx_flags &= ~TX_CMD_FLG_ACK;
+
+ if (ieee80211_is_probe_resp(fc))
+ tx_flags |= TX_CMD_FLG_TSF;
+
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar = (void *)skb->data;
+ u16 control = le16_to_cpu(bar->control);
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
+
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+ tx_cmd->tid_tspec = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+ WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+ iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+ ssn);
+ } else {
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ }
+
+ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ else
+ ac = tid_to_mac80211_ac[0];
+
+ tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
+ TX_CMD_FLG_BT_PRIO_POS;
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+ else if (ieee80211_is_action(fc))
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+ else
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+
+ /* The spec allows Action frames in A-MPDU, we don't support
+ * it
+ */
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+ } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+ } else {
+ tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+ }
+
+ if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+ ieee80211_action_contains_tpc(skb))
+ tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
+
+ tx_cmd->tx_flags = cpu_to_le32(tx_flags);
+ /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
+ tx_cmd->len = cpu_to_le16((u16)skb->len);
+ tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_cmd->sta_id = sta_id;
+
+ /* padding is inserted later in transport */
+ if (ieee80211_hdrlen(fc) % 4 &&
+ !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ tx_cmd->offload_assist |=
+ cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
+ offload_assist));
+}
+
+static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta)
+{
+ int rate_idx;
+ u8 rate_plcp;
+ u32 rate_flags;
+
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+ "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx);
+
+ rate_idx = info->control.rates[0].idx;
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+ rate_idx = rate_lowest_index(
+ &mvm->nvm_data->bands[info->band], sta);
+
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == NL80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+
+ /* For 2.4 GHZ band, check that there is no need to remap */
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+ if (info->band == NL80211_BAND_2GHZ &&
+ !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+ rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+ else
+ rate_flags =
+ BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ return (u32)rate_plcp | rate_flags;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are rate related
+ */
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc)) {
+ tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+ tx_cmd->rts_retry_limit =
+ min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+ } else {
+ tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ }
+
+ /*
+ * for data packets, rate info comes from the table inside the fw. This
+ * table is controlled by LINK_QUALITY commands
+ */
+
+ if (ieee80211_is_data(fc) && sta) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd->tx_flags |=
+ cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
+ }
+
+ mvm->mgmt_last_antenna_idx =
+ iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
+ mvm->mgmt_last_antenna_idx);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+}
+
+static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
+ u8 *crypto_hdr)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u64 pn;
+
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[2] = 0;
+ crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int hdrlen)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u8 *crypto_hdr = skb_frag->data + hdrlen;
+ enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
+ u64 pn;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+ ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
+ TX_CMD_SEC_WEP_KEY_IDX_MSK);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ type = TX_CMD_SEC_GCMP;
+ /* Fall through */
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ /* TODO: Taking the key from the table might introduce a race
+ * when PTK rekeying is done, having an old packets with a PN
+ * based on the old key but the message encrypted with a new
+ * one.
+ * Need to handle this.
+ */
+ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->key[0] = keyconf->hw_key_idx;
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+ break;
+ default:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
+ }
+}
+
+/*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+static struct iwl_device_cmd *
+iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info, int hdrlen,
+ struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_device_cmd *dev_cmd;
+ struct iwl_tx_cmd *tx_cmd;
+
+ dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
+
+ if (unlikely(!dev_cmd))
+ return NULL;
+
+ /* Make sure we zero enough of dev_cmd */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
+
+ memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
+ dev_cmd->hdr.cmd = TX_CMD;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ u16 offload_assist = 0;
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ }
+
+ offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
+ offload_assist);
+
+ /* padding is inserted later in transport */
+ if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
+ !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /* For data packets rate info comes from the fw */
+ if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+ }
+
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
+
+ cmd->offload_assist |= cpu_to_le32(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ } else {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ }
+ goto out;
+ }
+
+ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+ if (info->control.hw_key)
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
+
+ iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
+
+ iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+out:
+ return dev_cmd;
+}
+
+static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
+ struct iwl_device_cmd *cmd)
+{
+ struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
+ memset(&skb_info->status, 0, sizeof(skb_info->status));
+ memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
+
+ skb_info->driver_data[1] = cmd;
+}
+
+static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ struct iwl_mvm_vif *mvmvif;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ /*
+ * Non-bufferable frames use the broadcast station, thus they
+ * use the probe queue.
+ * Also take care of the case where we send a deauth to a
+ * station that we don't have, or similarly an association
+ * response (with non-success status) for a station we can't
+ * accept.
+ * Also, disassociate frames might happen, particular with
+ * reason 7 ("Class 3 frame received from nonassociated STA").
+ */
+ if (ieee80211_is_mgmt(fc) &&
+ (!ieee80211_is_bufferable_mmpdu(fc) ||
+ ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
+ return mvm->probe_queue;
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return mvmvif->cab_queue;
+
+ WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+ "fc=0x%02x", le16_to_cpu(fc));
+ return mvm->probe_queue;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (ieee80211_is_mgmt(fc))
+ return mvm->p2p_dev_queue;
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return mvmvif->cab_queue;
+
+ WARN_ON_ONCE(1);
+ return mvm->p2p_dev_queue;
+ default:
+ WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
+ return -1;
+ }
+}
+
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_info info;
+ struct iwl_device_cmd *dev_cmd;
+ u8 sta_id;
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int queue;
+
+ /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+ * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+ * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue
+ */
+ if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+ skb_info->hw_queue = mvm->aux_queue;
+
+ memcpy(&info, skb->cb, sizeof(info));
+
+ if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
+ return -1;
+
+ if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
+ return -1;
+
+ if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+ (!info.control.vif ||
+ info.hw_queue != info.control.vif->cab_queue)))
+ return -1;
+
+ queue = info.hw_queue;
+
+ /*
+ * If the interface on which the frame is sent is the P2P_DEVICE
+ * or an AP/GO interface use the broadcast station associated
+ * with it; otherwise if the interface is a managed interface
+ * use the AP station associated with it for multicast traffic
+ * (this is not possible for unicast packets as a TLDS discovery
+ * response are sent without a station entry); otherwise use the
+ * AUX station.
+ */
+ sta_id = mvm->aux_sta.sta_id;
+ if (info.control.vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info.control.vif);
+
+ if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
+ if (!ieee80211_is_data(hdr->frame_control))
+ sta_id = mvmvif->bcast_sta.sta_id;
+ else
+ sta_id = mvmvif->mcast_sta.sta_id;
+
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
+ hdr->frame_control);
+ if (queue < 0)
+ return -1;
+ } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ is_multicast_ether_addr(hdr->addr1)) {
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+
+ if (ap_sta_id != IWL_MVM_INVALID_STA)
+ sta_id = ap_sta_id;
+ } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
+ queue = mvm->snif_queue;
+ sta_id = mvm->snif_sta.sta_id;
+ }
+ }
+
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+ if (!dev_cmd)
+ return -1;
+
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_INET
+
+static int
+iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ netdev_features_t netdev_flags,
+ struct sk_buff_head *mpdus_skb)
+{
+ struct sk_buff *tmp, *next;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ char cb[sizeof(skb->cb)];
+ u16 i = 0;
+ unsigned int tcp_payload_len;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
+ memcpy(cb, skb->cb, sizeof(cb));
+
+ next = skb_gso_segment(skb, netdev_flags);
+ skb_shinfo(skb)->gso_size = mss;
+ if (WARN_ON_ONCE(IS_ERR(next)))
+ return -EINVAL;
+ else if (next)
+ consume_skb(skb);
+
+ while (next) {
+ tmp = next;
+ next = tmp->next;
+
+ memcpy(tmp->cb, cb, sizeof(tmp->cb));
+ /*
+ * Compute the length of all the data added for the A-MSDU.
+ * This will be used to compute the length to write in the TX
+ * command. We have: SNAP + IP + TCP for n -1 subframes and
+ * ETH header for n subframes.
+ */
+ tcp_payload_len = skb_tail_pointer(tmp) -
+ skb_transport_header(tmp) -
+ tcp_hdrlen(tmp) + tmp->data_len;
+
+ if (ipv4)
+ ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
+
+ if (tcp_payload_len > mss) {
+ skb_shinfo(tmp)->gso_size = mss;
+ } else {
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc;
+
+ if (ipv4)
+ ip_send_check(ip_hdr(tmp));
+
+ qc = ieee80211_get_qos_ctl((void *)tmp->data);
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ skb_shinfo(tmp)->gso_size = 0;
+ }
+
+ tmp->prev = NULL;
+ tmp->next = NULL;
+
+ __skb_queue_tail(mpdus_skb, tmp);
+ i++;
+ }
+
+ return 0;
+}
+
+static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ unsigned int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
+ u8 ac = tid_to_mac80211_ac[tid];
+ unsigned int txf;
+ int lmac = IWL_LMAC_24G_INDEX;
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ band == NL80211_BAND_5GHZ)
+ lmac = IWL_LMAC_5G_INDEX;
+
+ /* For HE redirect to trigger based fifos */
+ if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ ac += 4;
+
+ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+
+ /*
+ * Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+ return min_t(unsigned int, mvmsta->max_amsdu_len,
+ mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skb)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
+ u16 snap_ip_tcp, pad;
+ unsigned int dbg_max_amsdu_len;
+ netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
+ u8 tid;
+
+ snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
+ tcp_hdrlen(skb);
+
+ dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
+
+ if (!mvmsta->max_amsdu_len ||
+ !ieee80211_is_data_qos(hdr->frame_control) ||
+ (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
+ return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+ /*
+ * Do not build AMSDU for IPv6 with extension headers.
+ * ask stack to segment and checkum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ netdev_flags &= ~NETIF_F_CSUM_MASK;
+ return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ }
+
+ tid = ieee80211_get_tid(hdr);
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ /*
+ * No need to lock amsdu_in_ampdu_allowed since it can't be modified
+ * during an BA session.
+ */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
+ return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+ if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
+ !(mvmsta->amsdu_enabled & BIT(tid)))
+ return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+ max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
+
+ if (unlikely(dbg_max_amsdu_len))
+ max_amsdu_len = min_t(unsigned int, max_amsdu_len,
+ dbg_max_amsdu_len);
+
+ /*
+ * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
+ * supported. This is a spec requirement (IEEE 802.11-2015
+ * section 8.7.3 NOTE 3).
+ */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !sta->vht_cap.vht_supported)
+ max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
+
+ /* Sub frame header + SNAP + IP header + TCP header + MSS */
+ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+ pad = (4 - subf_len) & 0x3;
+
+ /*
+ * If we have N subframes in the A-MSDU, then the A-MSDU's size is
+ * N * subf_len + (N - 1) * pad.
+ */
+ num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
+
+ if (sta->max_amsdu_subframes &&
+ num_subframes > sta->max_amsdu_subframes)
+ num_subframes = sta->max_amsdu_subframes;
+
+ tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ /*
+ * Make sure we have enough TBs for the A-MSDU:
+ * 2 for each subframe
+ * 1 more for each fragment
+ * 1 more for the potential data in the header
+ */
+ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
+ mvm->trans->max_skb_frags)
+ num_subframes = 1;
+
+ if (num_subframes > 1)
+ *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* This skb fits in one single A-MSDU */
+ if (num_subframes * mss >= tcp_payload_len) {
+ __skb_queue_tail(mpdus_skb, skb);
+ return 0;
+ }
+
+ /*
+ * Trick the segmentation function to make it
+ * create SKBs that can fit into one A-MSDU.
+ */
+ return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
+ mpdus_skb);
+}
+#else /* CONFIG_INET */
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skb)
+{
+ /* Impossible to get TSO with CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif
+
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta, u8 tid,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 mac_queue = info->hw_queue;
+ struct sk_buff_head *deferred_tx_frames;
+
+ lockdep_assert_held(&mvm_sta->lock);
+
+ mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+ set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+ deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+ skb_queue_tail(deferred_tx_frames, skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC queues, so we
+ * should never get a second deferred frame for the RA/TID.
+ * In case of GSO the first packet may have been split, so don't warn.
+ */
+ if (skb_queue_len(deferred_tx_frames) == 1) {
+ iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+ schedule_work(&mvm->add_stream_wk);
+ }
+}
+
+/* Check if there are any timed-out TIDs on a given shared TXQ */
+static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
+{
+ unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
+ unsigned long now = jiffies;
+ int tid;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ return true;
+ }
+
+ return false;
+}
+
+static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ int airtime)
+{
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ if (mvm->tcm.paused)
+ return;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
+ mdata->tx.airtime += airtime;
+}
+
+static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int tid)
+{
+ u32 ac = tid_to_mac80211_ac[tid];
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ mdata->tx.pkts[ac]++;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_device_cmd *dev_cmd;
+ __le16 fc;
+ u16 seq_number = 0;
+ u8 tid = IWL_MAX_TID_COUNT;
+ u16 txq_id = info->hw_queue;
+ bool is_ampdu = false;
+ int hdrlen;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_hdrlen(fc);
+
+ if (WARN_ON_ONCE(!mvmsta))
+ return -1;
+
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+ return -1;
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+ sta, mvmsta->sta_id);
+ if (!dev_cmd)
+ goto drop;
+
+ /*
+ * we handle that entirely ourselves -- for uAPSD the firmware
+ * will always send a notification, and for PS-Poll responses
+ * we'll notify mac80211 when getting frame status
+ */
+ info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
+ spin_lock(&mvmsta->lock);
+
+ /* nullfunc frames should go to the MGMT queue regardless of QOS,
+ * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
+ * assignment of MGMT TID
+ */
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ tid = ieee80211_get_tid(hdr);
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ goto drop_unlock_sta;
+
+ is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ if (WARN_ON_ONCE(is_ampdu &&
+ mvmsta->tid_data[tid].state != IWL_AGG_ON))
+ goto drop_unlock_sta;
+
+ seq_number = mvmsta->tid_data[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ /* update the tx_cmd hdr as it was already copied */
+ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
+ }
+ }
+
+ txq_id = mvmsta->tid_data[tid].txq_id;
+
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+ /* Check if TXQ needs to be allocated or re-activated */
+ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
+ !mvmsta->tid_data[tid].is_tid_active)) {
+ /* If TXQ needs to be allocated... */
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
+ }
+
+ /* queue should always be active in new TX path */
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+ /* If we are here - TXQ exists and needs to be re-activated */
+ spin_lock(&mvm->queue_info_lock);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ spin_unlock(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
+ txq_id);
+ }
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ /* Keep track of the time of the last frame for this RA/TID */
+ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
+ /*
+ * If we have timed-out TIDs - schedule the worker that will
+ * reconfig the queues and update them
+ *
+ * Note that the mvm->queue_info_lock isn't being taken here in
+ * order to not serialize the TX flow. This isn't dangerous
+ * because scheduling mvm->add_stream_wk can't ruin the state,
+ * and if we DON'T schedule it due to some race condition then
+ * next TX we get here we will.
+ */
+ if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED &&
+ iwl_mvm_txq_should_update(mvm, txq_id)))
+ schedule_work(&mvm->add_stream_wk);
+ }
+
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
+ tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
+
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+ goto drop_unlock_sta;
+
+ if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
+ mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
+
+ spin_unlock(&mvmsta->lock);
+
+ iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+
+ return 0;
+
+drop_unlock_sta:
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+drop:
+ return -1;
+}
+
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_tx_info info;
+ struct sk_buff_head mpdus_skbs;
+ unsigned int payload_len;
+ int ret;
+
+ if (WARN_ON_ONCE(!mvmsta))
+ return -1;
+
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+ return -1;
+
+ memcpy(&info, skb->cb, sizeof(info));
+
+ if (!skb_is_gso(skb))
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+
+ payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ if (payload_len <= skb_shinfo(skb)->gso_size)
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+
+ __skb_queue_head_init(&mpdus_skbs);
+
+ ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
+ return ret;
+
+ while (!skb_queue_empty(&mpdus_skbs)) {
+ skb = __skb_dequeue(&mpdus_skbs);
+
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ if (ret) {
+ __skb_queue_purge(&mpdus_skbs);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct ieee80211_vif *vif = mvmsta->vif;
+ u16 normalized_ssn;
+
+ lockdep_assert_held(&mvmsta->lock);
+
+ if ((tid_data->state == IWL_AGG_ON ||
+ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ iwl_mvm_tid_queued(mvm, tid_data) == 0) {
+ /*
+ * Now that this aggregation or DQA queue is empty tell
+ * mac80211 so it knows we no longer have frames buffered for
+ * the station on this TID (for the TIM bitmap calculation.)
+ */
+ ieee80211_sta_set_buffered(sta, tid, false);
+ }
+
+ /*
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ normalized_ssn = tid_data->ssn;
+ if (mvm->trans->cfg->gen2)
+ normalized_ssn &= 0xff;
+
+ if (normalized_ssn != tid_data->next_reclaimed)
+ return;
+
+ switch (tid_data->state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue addBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue DELBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ tid_data->state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(SMALL_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r)
+{
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ r->flags |= IEEE80211_TX_RC_MCS;
+ r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ ieee80211_rate_set_vht(
+ r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1);
+ r->flags |= IEEE80211_TX_RC_VHT_MCS;
+ } else {
+ r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ band);
+ }
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+ info->status.antenna =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
+static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+ u32 status)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_tx_status *status_trig;
+ int i;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
+ status_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
+ /* don't collect on status 0 */
+ if (!status_trig->statuses[i].status)
+ break;
+
+ if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
+ continue;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Tx status %d was received",
+ status & TX_STATUS_MSK);
+ break;
+ }
+}
+
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+ struct iwl_mvm_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count) & 0xfff;
+}
+
+static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct ieee80211_sta *sta;
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ /* struct iwl_mvm_tx_resp_v3 is almost the same */
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ struct agg_tx_status *agg_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
+ u32 status = le16_to_cpu(agg_status->status);
+ u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
+ struct iwl_mvm_sta *mvmsta;
+ struct sk_buff_head skbs;
+ u8 skb_freed = 0;
+ u8 lq_color;
+ u16 next_reclaimed, seq_ctl;
+ bool is_ndp = false;
+
+ __skb_queue_head_init(&skbs);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ txq_id = le16_to_cpu(tx_resp->tx_queue);
+
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ bool flushed = false;
+
+ skb_freed++;
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* inform mac80211 about what happened with the frame */
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+ case TX_STATUS_FAIL_FIFO_FLUSHED:
+ case TX_STATUS_FAIL_DRAIN_FLOW:
+ flushed = true;
+ break;
+ case TX_STATUS_FAIL_DEST_PS:
+ /* the FW should have stopped the queue and not
+ * return this status
+ */
+ WARN_ON(1);
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If we are freeing multiple frames, mark all the frames
+ * but the first one as acked, since they were acknowledged
+ * before
+ * */
+ if (skb_freed > 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ iwl_mvm_tx_status_check_trigger(mvm, status);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+ info);
+ info->status.status_driver_data[1] =
+ (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
+
+ /* Single frame failure in an AMPDU queue => send BAR */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
+ if (ieee80211_is_back_req(hdr->frame_control))
+ seq_ctl = 0;
+ else if (status != TX_STATUS_SUCCESS)
+ seq_ctl = le16_to_cpu(hdr->seq_ctrl);
+
+ if (unlikely(!seq_ctl)) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ /*
+ * If it is an NDP, we can't update next_reclaim since
+ * its sequence control is 0. Note that for that same
+ * reason, NDPs are never sent to A-MPDU'able queues
+ * so that we can never have more than one freed frame
+ * for a single Tx resonse (see WARN_ON below).
+ */
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ is_ndp = true;
+ }
+
+ /*
+ * TODO: this is not accurate if we are freeing more than one
+ * packet.
+ */
+ info->status.tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+ info->status.status_driver_data[0] =
+ RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
+
+ ieee80211_tx_status(mvm->hw, skb);
+ }
+
+ /* This is an aggregation queue or might become one, so we use
+ * the ssn since: ssn = wifi seq_num % 256.
+ * The seq_ctl is the sequence control of the packet to which
+ * this Tx response relates. But if there is a hole in the
+ * bitmap of the BA we received, this Tx response may allow to
+ * reclaim the hole and all the subsequent packets that were
+ * already acked. In that case, seq_ctl != ssn, and the next
+ * packet to be reclaimed will be ssn and not seq_ctl. In that
+ * case, several packets will be reclaimed even if
+ * frame_count = 1.
+ *
+ * The ssn is the index (% 256) of the latest packet that has
+ * treated (acked / dropped) + 1.
+ */
+ next_reclaimed = ssn;
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TXQ %d status %s (0x%08x)\n",
+ txq_id, iwl_mvm_get_tx_fail_reason(status), status);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
+ le32_to_cpu(tx_resp->initial_rate),
+ tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
+ ssn, next_reclaimed, seq_ctl);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
+
+ if (!IS_ERR(sta)) {
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
+
+ if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
+ struct iwl_mvm_tid_data *tid_data =
+ &mvmsta->tid_data[tid];
+ bool send_eosp_ndp = false;
+
+ spin_lock_bh(&mvmsta->lock);
+
+ if (!is_ndp) {
+ tid_data->next_reclaimed = next_reclaimed;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ } else {
+ IWL_DEBUG_TX_REPLY(mvm,
+ "NDP - don't update next_reclaimed\n");
+ }
+
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+ if (mvmsta->sleep_tx_count) {
+ mvmsta->sleep_tx_count--;
+ if (mvmsta->sleep_tx_count &&
+ !iwl_mvm_tid_queued(mvm, tid_data)) {
+ /*
+ * The number of frames in the queue
+ * dropped to 0 even if we sent less
+ * frames than we thought we had on the
+ * Tx queue.
+ * This means we had holes in the BA
+ * window that we just filled, ask
+ * mac80211 to send EOSP since the
+ * firmware won't know how to do that.
+ * Send NDP and the firmware will send
+ * EOSP notification that will trigger
+ * a call to ieee80211_sta_eosp().
+ */
+ send_eosp_ndp = true;
+ }
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+ if (send_eosp_ndp) {
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
+ IEEE80211_FRAME_RELEASE_UAPSD,
+ 1, tid, false, false);
+ mvmsta->sleep_tx_count = 0;
+ ieee80211_send_eosp_nullfunc(sta, tid);
+ }
+ }
+
+ if (mvmsta->next_status_eosp) {
+ mvmsta->next_status_eosp = false;
+ ieee80211_sta_eosp(sta);
+ }
+ } else {
+ mvmsta = NULL;
+ }
+
+out:
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
+static const char *iwl_get_agg_tx_status(u16 status)
+{
+ switch (status & AGG_TX_STATE_STATUS_MSK) {
+ AGG_TX_STATE_(TRANSMITTED);
+ AGG_TX_STATE_(UNDERRUN);
+ AGG_TX_STATE_(BT_PRIO);
+ AGG_TX_STATE_(FEW_BYTES);
+ AGG_TX_STATE_(ABORT);
+ AGG_TX_STATE_(TX_ON_AIR_DROP);
+ AGG_TX_STATE_(LAST_SENT_TRY_CNT);
+ AGG_TX_STATE_(LAST_SENT_BT_KILL);
+ AGG_TX_STATE_(SCD_QUERY);
+ AGG_TX_STATE_(TEST_BAD_CRC32);
+ AGG_TX_STATE_(RESPONSE);
+ AGG_TX_STATE_(DUMP_TX);
+ AGG_TX_STATE_(DELAY_TX);
+ }
+
+ return "UNKNOWN";
+}
+
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct agg_tx_status *frame_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
+ int i;
+
+ for (i = 0; i < tx_resp->frame_count; i++) {
+ u16 fstatus = le16_to_cpu(frame_status[i].status);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
+ iwl_get_agg_tx_status(fstatus),
+ fstatus & AGG_TX_STATE_STATUS_MSK,
+ (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
+ AGG_TX_STATE_TRY_CNT_POS,
+ le16_to_cpu(frame_status[i].sequence));
+ }
+}
+#else
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ struct iwl_mvm_sta *mvmsta;
+ int queue = SEQ_TO_QUEUE(sequence);
+
+ if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
+ return;
+
+ if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
+ return;
+
+ iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+
+ if (!WARN_ON_ONCE(!mvmsta)) {
+ mvmsta->tid_data[tid].rate_n_flags =
+ le32_to_cpu(tx_resp->initial_rate);
+ mvmsta->tid_data[tid].tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
+ mvmsta->tid_data[tid].lq_color =
+ TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
+ }
+
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+
+ if (tx_resp->frame_count == 1)
+ iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+ else
+ iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+}
+
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ int txq, int index,
+ struct ieee80211_tx_info *ba_info, u32 rate)
+{
+ struct sk_buff_head reclaimed_skbs;
+ struct iwl_mvm_tid_data *tid_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct sk_buff *skb;
+ int freed;
+
+ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
+ tid > IWL_MAX_TID_COUNT,
+ "sta_id %d tid %d", sta_id, tid))
+ return;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* Reclaiming frames for a station that has been deleted ? */
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ tid_data = &mvmsta->tid_data[tid];
+
+ if (tid_data->txq_id != txq) {
+ IWL_ERR(mvm,
+ "invalid BA notification: Q %d, tid %d\n",
+ tid_data->txq_id, tid);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /*
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
+
+ tid_data->next_reclaimed = index;
+
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+ freed = 0;
+
+ /* pack lq color from tid_data along the reduced txp */
+ ba_info->status.status_driver_data[0] =
+ RS_DRV_DATA_PACK(tid_data->lq_color,
+ ba_info->status.status_driver_data[0]);
+ ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ if (freed == 1) {
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ memcpy(&info->status, &ba_info->status,
+ sizeof(ba_info->status));
+ iwl_mvm_hwrate_to_tx_status(rate, info);
+ }
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
+ * possible (i.e. first MPDU in the aggregation wasn't acked)
+ * Still it's important to update RS about sent vs. acked.
+ */
+ if (skb_queue_empty(&reclaimed_skbs)) {
+ struct ieee80211_chanctx_conf *chanctx_conf = NULL;
+
+ if (mvmsta->vif)
+ chanctx_conf =
+ rcu_dereference(mvmsta->vif->chanctx_conf);
+
+ if (WARN_ON_ONCE(!chanctx_conf))
+ goto out;
+
+ ba_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(rate, ba_info);
+
+ if (!iwl_mvm_has_tlc_offload(mvm)) {
+ IWL_DEBUG_TX_REPLY(mvm,
+ "No reclaim. Update rs directly\n");
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ }
+ }
+
+out:
+ rcu_read_unlock();
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ skb = __skb_dequeue(&reclaimed_skbs);
+ ieee80211_tx_status(mvm->hw, skb);
+ }
+}
+
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int sta_id, tid, txq, index;
+ struct ieee80211_tx_info ba_info = {};
+ struct iwl_mvm_ba_notif *ba_notif;
+ struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_sta *mvmsta;
+
+ ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_mvm_compressed_ba_notif *ba_res =
+ (void *)pkt->data;
+ u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
+ int i;
+
+ sta_id = ba_res->sta_id;
+ ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
+ ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
+ ba_info.status.tx_time =
+ (u16)le32_to_cpu(ba_res->wireless_time);
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_res->reduced_txp;
+
+ if (!le16_to_cpu(ba_res->tfd_cnt))
+ goto out;
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (!mvmsta)
+ goto out_unlock;
+
+ /* Free per TID */
+ for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
+ struct iwl_mvm_compressed_ba_tfd *ba_tfd =
+ &ba_res->tfd[i];
+
+ tid = ba_tfd->tid;
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
+ mvmsta->tid_data[i].lq_color = lq_color;
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid,
+ (int)(le16_to_cpu(ba_tfd->q_num)),
+ le16_to_cpu(ba_tfd->tfd_index),
+ &ba_info,
+ le32_to_cpu(ba_res->tx_rate));
+ }
+
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le32_to_cpu(ba_res->wireless_time));
+out_unlock:
+ rcu_read_unlock();
+out:
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+ return;
+ }
+
+ ba_notif = (void *)pkt->data;
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+ /* "flow" corresponds to Tx queue */
+ txq = le16_to_cpu(ba_notif->scd_flow);
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ index = le16_to_cpu(ba_notif->scd_ssn);
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (WARN_ON_ONCE(!mvmsta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
+ ba_info.status.ampdu_len = ba_notif->txed;
+ ba_info.status.tx_time = tid_data->tx_time;
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_notif->reduced_txp;
+
+ rcu_read_unlock();
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ ba_notif->sta_addr, ba_notif->sta_id);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ le64_to_cpu(ba_notif->bitmap), txq, index,
+ ba_notif->txed, ba_notif->txed_2_done);
+
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+ ba_notif->reduced_txp);
+}
+
+/*
+ * Note that there are transports that buffer frames before they reach
+ * the firmware. This means that after flush_tx_path is called, the
+ * queue might not be empty. The race-free way to handle this is to:
+ * 1) set the station as draining
+ * 2) flush the Tx path
+ * 3) wait for the transport queues to be empty
+ */
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
+ .queues_ctl = cpu_to_le32(tfd_msk),
+ .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
+ };
+
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+}
+
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+ u16 tids, u32 flags)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .sta_id = cpu_to_le32(sta_id),
+ .tid_mask = cpu_to_le16(tids),
+ };
+
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+}
+
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+{
+ struct iwl_mvm_int_sta *int_sta = sta;
+ struct iwl_mvm_sta *mvm_sta = sta;
+
+ BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
+ offsetof(struct iwl_mvm_sta, sta_id));
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
+ 0xff | BIT(IWL_MGMT_TID), flags);
+
+ if (internal)
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
+ flags);
+
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
new file mode 100644
index 000000000..bc3f67e0b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -0,0 +1,1878 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+#include "fw/api/rs.h"
+
+/*
+ * Will return 0 even if the cmd failed when RFKILL is asserted unless
+ * CMD_WANT_SKB is set in cmd->flags.
+ */
+int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+ if (WARN_ON(mvm->d3_test_active))
+ return -EIO;
+#endif
+
+ /*
+ * Synchronous commands from this op-mode must hold
+ * the mutex, this ensures we don't try to send two
+ * (or more) synchronous commands at a time.
+ */
+ if (!(cmd->flags & CMD_ASYNC)) {
+ lockdep_assert_held(&mvm->mutex);
+ if (!(cmd->flags & CMD_SEND_IN_IDLE))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
+ }
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+
+ if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
+
+ /*
+ * If the caller wants the SKB, then don't hide any problems, the
+ * caller might access the response buffer which will be NULL if
+ * the command failed.
+ */
+ if (cmd->flags & CMD_WANT_SKB)
+ return ret;
+
+ /* Silently ignore failures if RFKILL is asserted */
+ if (!ret || ret == -ERFKILL)
+ return 0;
+ return ret;
+}
+
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
+ u32 flags, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/*
+ * We assume that the caller set the status to the success value
+ */
+int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
+ u32 *status)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_cmd_response *resp;
+ int ret, resp_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+ if (WARN_ON(mvm->d3_test_active))
+ return -EIO;
+#endif
+
+ /*
+ * Only synchronous commands can wait for status,
+ * we use WANT_SKB so the caller can't.
+ */
+ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
+ "cmd flags %x", cmd->flags))
+ return -EINVAL;
+
+ cmd->flags |= CMD_WANT_SKB;
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+ if (ret == -ERFKILL) {
+ /*
+ * The command failed because of RFKILL, don't update
+ * the status, leave it as success and return 0.
+ */
+ return 0;
+ } else if (ret) {
+ return ret;
+ }
+
+ pkt = cmd->resp_pkt;
+
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32_to_cpu(resp->status);
+ out_free_resp:
+ iwl_free_resp(cmd);
+ return ret;
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
+ const void *data, u32 *status)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+}
+
+#define IWL_DECLARE_RATE_INFO(r) \
+ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1),
+ IWL_DECLARE_RATE_INFO(2),
+ IWL_DECLARE_RATE_INFO(5),
+ IWL_DECLARE_RATE_INFO(11),
+ IWL_DECLARE_RATE_INFO(6),
+ IWL_DECLARE_RATE_INFO(9),
+ IWL_DECLARE_RATE_INFO(12),
+ IWL_DECLARE_RATE_INFO(18),
+ IWL_DECLARE_RATE_INFO(24),
+ IWL_DECLARE_RATE_INFO(36),
+ IWL_DECLARE_RATE_INFO(48),
+ IWL_DECLARE_RATE_INFO(54),
+};
+
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ int idx;
+ int band_offset = 0;
+
+ /* Legacy rate format, search for match in table */
+ if (band == NL80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (fw_rate_idx_to_plcp[idx] == rate)
+ return idx - band_offset;
+
+ return -1;
+}
+
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
+{
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ return fw_rate_idx_to_plcp[rate_idx];
+}
+
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+ IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+ le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+ IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+ le16_to_cpu(err_resp->bad_cmd_seq_num),
+ le32_to_cpu(err_resp->error_service));
+ IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+ le64_to_cpu(err_resp->timestamp));
+}
+
+/*
+ * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
+ * The parameter should also be a combination of ANT_[ABC].
+ */
+u8 first_antenna(u8 mask)
+{
+ BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+ if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
+ return BIT(0);
+ return BIT(ffs(mask) - 1);
+}
+
+/*
+ * Toggles between TX antennas to send the probe request on.
+ * Receives the bitmask of valid TX antennas and the *index* used
+ * for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
+{
+ u8 ind = last_idx;
+ int i;
+
+ for (i = 0; i < MAX_ANT_NUM; i++) {
+ ind = (ind + 1) % MAX_ANT_NUM;
+ if (valid & BIT(ind))
+ return ind;
+ }
+
+ WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+ return last_idx;
+}
+
+static const struct {
+ const char *name;
+ u8 num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == num)
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table_v1 {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 gp3; /* GP3 timer register */
+ u32 ucode_ver; /* uCode version */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 trm_hw_status0; /* TRM HW status */
+ u32 trm_hw_status1; /* TRM HW status */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 fw_rev_type; /* firmware revision type */
+ u32 major; /* uCode version major */
+ u32 minor; /* uCode version minor */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 last_cmd_id; /* last HCMD id handled by the firmware */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 umac_major;
+ u32 umac_minor;
+ u32 frame_pointer; /* core register 27*/
+ u32 stack_pointer; /* core register 28 */
+ u32 cmd_header; /* latest host cmd sent to UMAC */
+ u32 nic_isr_pref; /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_umac_error_event_table table;
+
+ if (!mvm->support_umac_log)
+ return;
+
+ iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
+ sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
+ IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
+ IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
+ IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
+ IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
+ IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
+ IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
+}
+
+static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_error_event_table table;
+ u32 val;
+
+ if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
+ if (!base)
+ base = mvm->fw->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = mvm->fw->inst_errlog_ptr;
+ }
+
+ if (base < 0x400000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ /* check if there is a HW error */
+ val = iwl_trans_read_mem32(trans, base);
+ if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+ int err;
+
+ IWL_ERR(trans, "HW error, resetting before reading\n");
+
+ /* reset the device */
+ iwl_trans_sw_reset(trans);
+
+ /* set INIT_DONE flag */
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
+
+ /* and wait for clock stabilization */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ err = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ 25000);
+ if (err < 0) {
+ IWL_DEBUG_INFO(trans,
+ "Failed to reset the card for the dump\n");
+ return;
+ }
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ /* Do not change this output - scripts rely on it */
+
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
+ trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+ IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
+ IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+ IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
+ IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
+ IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
+ IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+ IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
+ IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+ IWL_ERR(mvm,
+ "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+ return;
+ }
+
+ iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
+
+ if (mvm->error_event_table[1])
+ iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
+
+ iwl_mvm_dump_umac_error_log(mvm);
+}
+
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
+ /* Start by looking for a free queue */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].hw_queue_refcount == 0 &&
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+ return i;
+
+ /*
+ * If no free queue found - settle for an inactive one to reconfigure
+ * Make sure that the inactive queue either already belongs to this STA,
+ * or that if it belongs to another one - it isn't the reserved queue
+ */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
+ (sta_id == mvm->queue_info[i].ra_sta_id ||
+ !mvm->queue_info[i].reserved))
+ return i;
+
+ return -ENOSPC;
+}
+
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+ "Trying to reconfig unallocated queue %d\n", queue)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -ENXIO;
+ }
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 sta_id, u8 tid)
+{
+ bool enable_queue = true;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure this TID isn't already enabled */
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+ queue, tid);
+ return false;
+ }
+
+ /* Update mappings and refcounts */
+ if (mvm->queue_info[queue].hw_queue_refcount > 0)
+ enable_queue = false;
+
+ if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+ WARN(mac80211_queue >=
+ BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+ "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+ mac80211_queue, queue, sta_id, tid);
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ }
+
+ mvm->queue_info[queue].hw_queue_refcount++;
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
+
+ if (enable_queue) {
+ if (tid != IWL_MAX_TID_COUNT)
+ mvm->queue_info[queue].mac80211_ac =
+ tid_to_mac80211_ac[tid];
+ else
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = tid;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+ queue, mvm->queue_info[queue].hw_queue_refcount,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return enable_queue;
+}
+
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ struct iwl_tx_queue_cfg_cmd cmd = {
+ .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+ .sta_id = sta_id,
+ .tid = tid,
+ };
+ int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+
+ if (cmd.tid == IWL_MAX_TID_COUNT) {
+ cmd.tid = IWL_MGMT_TID;
+ size = IWL_MGMT_QUEUE_SIZE;
+ }
+ queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
+ SCD_QUEUE_CFG, size, timeout);
+
+ if (queue < 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+ sta_id, tid, queue);
+ return queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+ queue, sta_id, tid);
+
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+ queue, mvm->hw_queue_to_mac80211[queue]);
+
+ return queue;
+}
+
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Send the enabling command if we need to */
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
+}
+
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u8 tid, u8 flags)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+ int ret;
+
+ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+ return -EINVAL;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_free(mvm->trans, queue);
+
+ return 0;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ /*
+ * If there is another TID with the same AC - don't remove the MAC queue
+ * from the mapping
+ */
+ if (tid < IWL_MAX_TID_COUNT) {
+ unsigned long tid_bitmap =
+ mvm->queue_info[queue].tid_bitmap;
+ int ac = tid_to_mac80211_ac[tid];
+ int i;
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+ if (tid_to_mac80211_ac[i] == ac)
+ remove_mac_queue = false;
+ }
+ }
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+ mvm->queue_info[queue].hw_queue_refcount--;
+
+ cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+ queue,
+ mvm->queue_info[queue].hw_queue_refcount,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ /* If the queue is still enabled - nothing left to do in this func */
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+
+ /* Make sure queue info is correct even though we overwrite it */
+ WARN(mvm->queue_info[queue].hw_queue_refcount ||
+ mvm->queue_info[queue].tid_bitmap ||
+ mvm->hw_queue_to_mac80211[queue],
+ "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
+ queue, mvm->queue_info[queue].hw_queue_refcount,
+ mvm->hw_queue_to_mac80211[queue],
+ mvm->queue_info[queue].tid_bitmap);
+
+ /* If we are here - the queue is freed and we can zero out these vals */
+ mvm->queue_info[queue].hw_queue_refcount = 0;
+ mvm->queue_info[queue].tid_bitmap = 0;
+ mvm->hw_queue_to_mac80211[queue] = 0;
+
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+ mvm->queue_info[queue].reserved = false;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
+ return ret;
+}
+
+/**
+ * iwl_mvm_send_lq_cmd() - Send link quality command
+ * @sync: This command can be sent synchronously.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
+{
+ struct iwl_host_cmd cmd = {
+ .id = LQ_CMD,
+ .len = { sizeof(struct iwl_lq_cmd), },
+ .flags = sync ? 0 : CMD_ASYNC,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+ iwl_mvm_has_tlc_offload(mvm)))
+ return -EINVAL;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/**
+ * iwl_mvm_update_smps - Get a request to change the SMPS mode
+ * @req_type: The part of the driver who call for a change.
+ * @smps_requests: The request to change the SMPS mode.
+ *
+ * Get a requst to change the SMPS mode,
+ * and change it according to all other requests in the driver.
+ */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request)
+{
+ struct iwl_mvm_vif *mvmvif;
+ enum ieee80211_smps_mode smps_mode;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
+ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mvmvif->smps_requests[req_type] = smps_request;
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
+ smps_mode = IEEE80211_SMPS_STATIC;
+ break;
+ }
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ }
+
+ ieee80211_request_smps(vif, smps_mode);
+}
+
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
+{
+ struct iwl_statistics_cmd scmd = {
+ .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
+ };
+ struct iwl_host_cmd cmd = {
+ .id = STATISTICS_CMD,
+ .len[0] = sizeof(scmd),
+ .data[0] = &scmd,
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+ iwl_free_resp(&cmd);
+
+ if (clear)
+ iwl_mvm_accu_radio_stats(mvm);
+
+ return 0;
+}
+
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+{
+ mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+ mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+ mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+ mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+}
+
+static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *result = _data;
+ int i;
+
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+ mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+ *result = false;
+ }
+}
+
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+{
+ bool result = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ return false;
+
+ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
+ return false;
+
+ if (mvm->cfg->rx_with_siso_diversity)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_diversity_iter, &result);
+
+ return result;
+}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mvm_low_latency_cause cause)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int res;
+ bool prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ prev = iwl_mvm_vif_low_latency(mvmvif);
+ iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
+
+ low_latency = iwl_mvm_vif_low_latency(mvmvif);
+
+ if (low_latency == prev)
+ return 0;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mvmvif->id)
+ };
+
+ if (low_latency) {
+ /* currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+ res = iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(LOW_LATENCY_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (res)
+ IWL_ERR(mvm, "Failed to send low latency command\n");
+ }
+
+ res = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (res)
+ return res;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ return iwl_mvm_power_update_mac(mvm);
+}
+
+struct iwl_mvm_low_latency_iter {
+ bool result;
+ bool result_per_band[NUM_NL80211_BANDS];
+};
+
+static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_low_latency_iter *result = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum nl80211_band band;
+
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ result->result = true;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ band = mvmvif->phy_ctxt->channel->band;
+ result->result_per_band[band] = true;
+ }
+}
+
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_low_latency_iter data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &data);
+
+ return data.result;
+}
+
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ struct iwl_mvm_low_latency_iter data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &data);
+
+ return data.result_per_band[band];
+}
+
+struct iwl_bss_iter_data {
+ struct ieee80211_vif *vif;
+ bool error;
+};
+
+static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bss_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return;
+
+ if (data->vif) {
+ data->error = true;
+ return;
+ }
+
+ data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
+{
+ struct iwl_bss_iter_data bss_iter_data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bss_iface_iterator, &bss_iter_data);
+
+ if (bss_iter_data.error) {
+ IWL_ERR(mvm, "More than one managed interface active!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return bss_iter_data.vif;
+}
+
+struct iwl_sta_iter_data {
+ bool assoc;
+};
+
+static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_sta_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (vif->bss_conf.assoc)
+ data->assoc = true;
+}
+
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
+{
+ struct iwl_sta_iter_data data = {
+ .assoc = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_sta_iface_iterator,
+ &data);
+ return data.assoc;
+}
+
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool tdls, bool cmd_q)
+{
+ struct iwl_fw_dbg_trigger_tlv *trigger;
+ struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
+ unsigned int default_timeout =
+ cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
+ /*
+ * We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+ vif && vif->type == NL80211_IFTYPE_AP)
+ return IWL_WATCHDOG_DISABLED;
+ return iwlmvm_mod_params.tfd_q_hang_detect ?
+ default_timeout : IWL_WATCHDOG_DISABLED;
+ }
+
+ trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
+ txq_timer = (void *)trigger->data;
+
+ if (tdls)
+ return le32_to_cpu(txq_timer->tdls);
+
+ if (cmd_q)
+ return le32_to_cpu(txq_timer->command_queue);
+
+ if (WARN_ON(!vif))
+ return default_timeout;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ return le32_to_cpu(txq_timer->ibss);
+ case NL80211_IFTYPE_STATION:
+ return le32_to_cpu(txq_timer->bss);
+ case NL80211_IFTYPE_AP:
+ return le32_to_cpu(txq_timer->softap);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return le32_to_cpu(txq_timer->p2p_client);
+ case NL80211_IFTYPE_P2P_GO:
+ return le32_to_cpu(txq_timer->p2p_go);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return le32_to_cpu(txq_timer->p2p_device);
+ case NL80211_IFTYPE_MONITOR:
+ return default_timeout;
+ default:
+ WARN_ON(1);
+ return mvm->cfg->base_params->wd_timeout;
+ }
+}
+
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ goto out;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+ trig_mlme = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ goto out;
+
+ if (trig_mlme->stop_connection_loss &&
+ --trig_mlme->stop_connection_loss)
+ goto out;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
+
+out:
+ ieee80211_connection_loss(vif);
+}
+
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ */
+static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int queue,
+ unsigned long tid_bitmap)
+{
+ int tid;
+
+ lockdep_assert_held(&mvmsta->lock);
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ /* If some TFDs are still queued - don't mark TID as inactive */
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+
+ /* Don't mark as inactive any TID that has an active BA */
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+ tid_bitmap &= ~BIT(tid);
+ }
+
+ /* If all TIDs in the queue are inactive - mark queue as inactive. */
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
+ mvmsta->tid_data[tid].is_tid_active = false;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
+ queue);
+ return;
+ }
+
+ /*
+ * If we are here, this is a shared queue and not all TIDs timed-out.
+ * Remove the ones that did.
+ */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
+ mvm->queue_info[queue].hw_queue_refcount--;
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+ mvmsta->tid_data[tid].is_tid_active = false;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Removing inactive TID %d from shared Q:%d\n",
+ tid, queue);
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /*
+ * There may be different TIDs with the same mac queues, so make
+ * sure all TIDs have existing corresponding mac queues enabled
+ */
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ mvm->hw_queue_to_mac80211[queue] |=
+ BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+ }
+
+ /* If the queue is marked as shared - "unshare" it */
+ if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ }
+}
+
+void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
+{
+ unsigned long timeout_queues_map = 0;
+ unsigned long now = jiffies;
+ int i;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
+ if (mvm->queue_info[i].hw_queue_refcount > 0)
+ timeout_queues_map |= BIT(i);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ /*
+ * If a queue time outs - mark it as INACTIVE (don't remove right away
+ * if we don't have to.) This is an optimization in case traffic comes
+ * later, and we don't HAVE to use a currently-inactive queue
+ */
+ for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid;
+ unsigned long inactive_tid_bitmap = 0;
+ unsigned long queue_tid_bitmap;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+
+ /* If TXQ isn't in active use anyway - nothing to do here... */
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ continue;
+ }
+
+ /* Check to see if there are inactive TIDs on this queue */
+ for_each_set_bit(tid, &queue_tid_bitmap,
+ IWL_MAX_TID_COUNT + 1) {
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ continue;
+
+ inactive_tid_bitmap |= BIT(tid);
+ }
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* If all TIDs are active - finish check on this queue */
+ if (!inactive_tid_bitmap)
+ continue;
+
+ /*
+ * If we are here - the queue hadn't been served recently and is
+ * in use
+ */
+
+ sta_id = mvm->queue_info[i].ra_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * If the STA doesn't exist anymore, it isn't an error. It could
+ * be that it was removed since getting the queues, and in this
+ * case it should've inactivated its queues anyway.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ spin_lock(&mvm->queue_info_lock);
+ iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+ inactive_tid_bitmap);
+ spin_unlock(&mvm->queue_info_lock);
+ spin_unlock_bh(&mvmsta->lock);
+ }
+
+ rcu_read_unlock();
+}
+
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_sta *sta,
+ u16 tid)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Frame from %pM timed out, tid %d",
+ sta->addr, tid);
+}
+
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
+{
+ if (!elapsed)
+ return 0;
+
+ return (100 * airtime / elapsed) / USEC_PER_MSEC;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
+{
+ u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
+
+ if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
+ return IWL_MVM_TRAFFIC_HIGH;
+ if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
+ return IWL_MVM_TRAFFIC_MEDIUM;
+
+ return IWL_MVM_TRAFFIC_LOW;
+}
+
+struct iwl_mvm_tcm_iter_data {
+ struct iwl_mvm *mvm;
+ bool any_sent;
+};
+
+static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_tcm_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
+
+ if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ low_latency = mvm->tcm.result.low_latency[mvmvif->id];
+
+ if (!mvm->tcm.result.change[mvmvif->id] &&
+ prev == low_latency) {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ return;
+ }
+
+ if (prev != low_latency) {
+ /* this sends traffic load and updates quota as well */
+ iwl_mvm_update_low_latency(mvm, vif, low_latency,
+ LOW_LATENCY_TRAFFIC);
+ } else {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+
+ data->any_sent = true;
+}
+
+static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tcm_iter_data data = {
+ .mvm = mvm,
+ .any_sent = false,
+ };
+
+ mutex_lock(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iter, &data);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+
+ mvmvif = container_of(wk, struct iwl_mvm_vif,
+ uapsd_nonagg_detected_wk.work);
+ vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+ mvm = mvmvif->mvm;
+
+ if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
+ return;
+
+ /* remember that this AP is broken */
+ memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
+ vif->bss_conf.bssid, ETH_ALEN);
+ mvm->uapsd_noagg_bssid_write_idx++;
+ if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
+ mvm->uapsd_noagg_bssid_write_idx = 0;
+
+ iwl_mvm_connection_loss(mvm, vif,
+ "AP isn't using AMPDU with uAPSD enabled");
+}
+
+static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int *mac_id = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (mvmvif->id != *mac_id)
+ return;
+
+ if (!vif->bss_conf.assoc)
+ return;
+
+ if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
+ return;
+
+ if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
+ return;
+
+ mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
+ IWL_INFO(mvm,
+ "detected AP should do aggregation but isn't, likely due to U-APSD\n");
+ schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
+}
+
+static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
+ unsigned int elapsed,
+ int mac)
+{
+ u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
+ u64 tpt;
+ unsigned long rate;
+
+ rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
+
+ if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
+ mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
+ return;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ tpt = 8 * bytes; /* kbps */
+ do_div(tpt, elapsed);
+ rate *= 1000; /* kbps */
+ if (tpt < 22 * rate / 100)
+ return;
+ } else {
+ /*
+ * the rate here is actually the threshold, in 100Kbps units,
+ * so do the needed conversion from bytes to 100Kbps:
+ * 100kb = bits / (100 * 1000),
+ * 100kbps = 100kb / (msecs / 1000) ==
+ * (bits / (100 * 1000)) / (msecs / 1000) ==
+ * bits / (100 * msecs)
+ */
+ tpt = (8 * bytes);
+ do_div(tpt, elapsed * 100);
+ if (tpt < rate)
+ return;
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_uapsd_agg_disconnect_iter, &mac);
+}
+
+static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 *band = _data;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
+}
+
+static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
+ unsigned long ts,
+ bool handle_uapsd)
+{
+ unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
+ unsigned int uapsd_elapsed =
+ jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
+ u32 total_airtime = 0;
+ u32 band_airtime[NUM_NL80211_BANDS] = {0};
+ u32 band[NUM_MAC_INDEX_DRIVER] = {0};
+ int ac, mac, i;
+ bool low_latency = false;
+ enum iwl_mvm_traffic_load load, band_load;
+ bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
+
+ if (handle_ll)
+ mvm->tcm.ll_ts = ts;
+ if (handle_uapsd)
+ mvm->tcm.uapsd_nonagg_ts = ts;
+
+ mvm->tcm.result.elapsed = elapsed;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iterator,
+ &band);
+
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ u32 vo_vi_pkts = 0;
+ u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
+
+ total_airtime += airtime;
+ band_airtime[band[mac]] += airtime;
+
+ load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
+ mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
+ mvm->tcm.result.load[mac] = load;
+ mvm->tcm.result.airtime[mac] = airtime;
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
+ vo_vi_pkts += mdata->rx.pkts[ac] +
+ mdata->tx.pkts[ac];
+
+ /* enable immediately with enough packets but defer disabling */
+ if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
+ mvm->tcm.result.low_latency[mac] = true;
+ else if (handle_ll)
+ mvm->tcm.result.low_latency[mac] = false;
+
+ if (handle_ll) {
+ /* clear old data */
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ }
+ low_latency |= mvm->tcm.result.low_latency[mac];
+
+ if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
+ iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
+ mac);
+ /* clear old data */
+ if (handle_uapsd)
+ mdata->uapsd_nonagg_detect.rx_bytes = 0;
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+ }
+
+ load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
+ mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
+ mvm->tcm.result.global_load = load;
+
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
+ mvm->tcm.result.band_load[i] = band_load;
+ }
+
+ /*
+ * If the current load isn't low we need to force re-evaluation
+ * in the TCM period, so that we can return to low load if there
+ * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
+ * triggered by traffic).
+ */
+ if (load != IWL_MVM_TRAFFIC_LOW)
+ return MVM_TCM_PERIOD;
+ /*
+ * If low-latency is active we need to force re-evaluation after
+ * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
+ * when there's no traffic at all.
+ */
+ if (low_latency)
+ return MVM_LL_PERIOD;
+ /*
+ * Otherwise, we don't need to run the work struct because we're
+ * in the default "idle" state - traffic indication is low (which
+ * also covers the "no traffic" case) and low-latency is disabled
+ * so there's no state that may need to be disabled when there's
+ * no traffic at all.
+ *
+ * Note that this has no impact on the regular scheduling of the
+ * updates triggered by traffic - those happen whenever one of the
+ * two timeouts expire (if there's traffic at all.)
+ */
+ return 0;
+}
+
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
+{
+ unsigned long ts = jiffies;
+ bool handle_uapsd =
+ time_after(ts, mvm->tcm.uapsd_nonagg_ts +
+ msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
+
+ spin_lock(&mvm->tcm.lock);
+ if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ spin_unlock(&mvm->tcm.lock);
+ return;
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
+ mutex_lock(&mvm->mutex);
+ if (iwl_mvm_request_statistics(mvm, true))
+ handle_uapsd = false;
+ mutex_unlock(&mvm->mutex);
+ }
+
+ spin_lock(&mvm->tcm.lock);
+ /* re-check if somebody else won the recheck race */
+ if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ /* calculate statistics */
+ unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
+ handle_uapsd);
+
+ /* the memset needs to be visible before the timestamp */
+ smp_mb();
+ mvm->tcm.ts = ts;
+ if (work_delay)
+ schedule_delayed_work(&mvm->tcm.work, work_delay);
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ iwl_mvm_tcm_results(mvm);
+}
+
+void iwl_mvm_tcm_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ tcm.work);
+
+ iwl_mvm_recalc_tcm(mvm);
+}
+
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
+{
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.paused = true;
+ spin_unlock_bh(&mvm->tcm.lock);
+ if (with_cancel)
+ cancel_delayed_work_sync(&mvm->tcm.work);
+}
+
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
+{
+ int mac;
+ bool low_latency = false;
+
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+
+ if (mvm->tcm.result.low_latency[mac])
+ low_latency = true;
+ }
+ /* The TCM data needs to be reset before "paused" flag changes */
+ smp_mb();
+ mvm->tcm.paused = false;
+
+ /*
+ * if the current load is not low or low latency is active, force
+ * re-evaluation to cover the case of no traffic.
+ */
+ if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
+ schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
+ else if (low_latency)
+ schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
+
+ spin_unlock_bh(&mvm->tcm.lock);
+}
+
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
+ iwl_mvm_tcm_uapsd_nonagg_detected_wk);
+}
+
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
+}
+
+
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+{
+ bool ps_disabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Disable power save when reading GP2 */
+ ps_disabled = mvm->ps_disabled;
+ if (!ps_disabled) {
+ mvm->ps_disabled = true;
+ iwl_mvm_power_update_device(mvm);
+ }
+
+ *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ *boottime = ktime_get_boot_ns();
+
+ if (!ps_disabled) {
+ mvm->ps_disabled = ps_disabled;
+ iwl_mvm_power_update_device(mvm);
+ }
+}