summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/fw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/fw')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c1185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h349
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h188
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h613
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h863
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h406
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h529
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h424
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/led.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h1694
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h768
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h575
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/paging.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h670
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h761
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h849
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h1249
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sf.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h494
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h961
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/system.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h418
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h966
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c3152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h330
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c411
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c458
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h560
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h955
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h274
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c135
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c296
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c356
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c255
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c324
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h65
56 files changed, 24094 insertions, 0 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
new file mode 100644
index 000000000..585e8cd2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2019-2022 Intel Corporation
+ */
+#include <linux/uuid.h>
+#include <linux/dmi.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "acpi.h"
+#include "fw/runtime.h"
+
+const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
+ 0xA5, 0xB3, 0x1F, 0x73,
+ 0x8E, 0x28, 0x5A, 0xDE);
+IWL_EXPORT_SYMBOL(iwl_guid);
+
+const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
+ 0x81, 0x4F, 0x75, 0xE4,
+ 0xDD, 0x26, 0xB5, 0xFD);
+IWL_EXPORT_SYMBOL(iwl_rfi_guid);
+
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ {}
+};
+
+static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
+ acpi_handle *ret_handle)
+{
+ acpi_handle root_handle;
+ acpi_status status;
+
+ root_handle = ACPI_HANDLE(dev);
+ if (!root_handle) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: Could not retrieve root port handle\n");
+ return -ENOENT;
+ }
+
+ status = acpi_get_handle(root_handle, method, ret_handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: %s method not found\n", method);
+ return -ENOENT;
+ }
+ return 0;
+}
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle handle;
+ acpi_status status;
+ int ret;
+
+ ret = iwl_acpi_get_handle(dev, method, &handle);
+ if (ret)
+ return ERR_PTR(-ENOENT);
+
+ /* Call the method with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: %s method invocation failed (status: 0x%x)\n",
+ method, status);
+ return ERR_PTR(-ENOENT);
+ }
+ return buf.pointer;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
+
+/*
+ * Generic function for evaluating a method defined in the device specific
+ * method (DSM) interface. The returned acpi object must be freed by calling
+ * function.
+ */
+static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args,
+ const guid_t *guid)
+{
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func,
+ args);
+ if (!obj) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method invocation failed (rev: %d, func:%d)\n",
+ rev, func);
+ return ERR_PTR(-ENOENT);
+ }
+ return obj;
+}
+
+/*
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
+ */
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ const guid_t *guid, u64 *value,
+ size_t expected_size)
+{
+ union acpi_object *obj;
+ int ret = 0;
+
+ obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
+ if (IS_ERR(obj)) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Failed to get DSM object. func= %d\n",
+ func);
+ return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *value = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ __le64 le_value = 0;
+
+ if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+ return -EINVAL;
+
+ /* if the buffer size doesn't match the expected size */
+ if (obj->buffer.length != expected_size)
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+ obj->buffer.length);
+
+ /* assuming LE from Intel BIOS spec */
+ memcpy(&le_value, obj->buffer.pointer,
+ min_t(size_t, expected_size, (size_t)obj->buffer.length));
+ *value = le64_to_cpu(le_value);
+ } else {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method did not return a valid object, type=%d\n",
+ obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ IWL_DEBUG_DEV_RADIO(dev,
+ "ACPI: DSM method evaluated: func=%d, ret=%d\n",
+ func, ret);
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
+ const guid_t *guid, u8 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func,
+ guid, &val, sizeof(u8));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u8 */
+ *value = (u8)val;
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+
+/*
+ * Evaluate a DSM with no arguments and a u32 return value,
+ */
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+ const guid_t *guid, u32 *value)
+{
+ int ret;
+ u64 val;
+
+ ret = iwl_acpi_get_dsm_integer(dev, rev, func,
+ guid, &val, sizeof(u32));
+
+ if (ret < 0)
+ return ret;
+
+ /* cast val (u64) to be u32 */
+ *value = (u32)val;
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
+
+union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev,
+ union acpi_object *data,
+ int min_data_size,
+ int max_data_size,
+ int *tbl_rev)
+{
+ int i;
+ union acpi_object *wifi_pkg;
+
+ /*
+ * We need at least one entry in the wifi package that
+ * describes the domain, and one more entry, otherwise there's
+ * no point in reading it.
+ */
+ if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * We need at least two packages, one for the revision and one
+ * for the data itself. Also check that the revision is valid
+ * (i.e. it is an integer (each caller has to check by itself
+ * if the returned revision is supported)).
+ */
+ if (data->type != ACPI_TYPE_PACKAGE ||
+ data->package.count < 2 ||
+ data->package.elements[0].type != ACPI_TYPE_INTEGER) {
+ IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ *tbl_rev = data->package.elements[0].integer.value;
+
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < data->package.count; i++) {
+ union acpi_object *domain;
+
+ wifi_pkg = &data->package.elements[i];
+
+ /* skip entries that are not a package with the right size */
+ if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
+ wifi_pkg->package.count < min_data_size ||
+ wifi_pkg->package.count > max_data_size)
+ continue;
+
+ domain = &wifi_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == ACPI_WIFI_DOMAIN)
+ goto found;
+ }
+
+ return ERR_PTR(-ENOENT);
+
+found:
+ return wifi_pkg;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
+
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ union iwl_tas_config_cmd *cmd, int fw_ver)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret, tbl_rev, i, block_list_size, enabled;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* try to read wtas table revision 1 or revision 0*/
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WTAS_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (tbl_rev == 1 && wifi_pkg->package.elements[1].type ==
+ ACPI_TYPE_INTEGER) {
+ u32 tas_selection =
+ (u32)wifi_pkg->package.elements[1].integer.value;
+ u16 override_iec =
+ (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
+ u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
+ ACPI_WTAS_ENABLE_IEC_POS;
+ u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
+
+
+ enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
+ if (fw_ver <= 3) {
+ cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
+ cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
+ } else {
+ cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
+ cmd->v4.override_tas_iec = (u8)override_iec;
+ cmd->v4.enable_tas_iec = (u8)enabled_iec;
+ }
+
+ } else if (tbl_rev == 0 &&
+ wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
+ enabled = !!wifi_pkg->package.elements[1].integer.value;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (!enabled) {
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out_free;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
+ if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[2].integer.value >
+ APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
+ wifi_pkg->package.elements[2].integer.value);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ block_list_size = wifi_pkg->package.elements[2].integer.value;
+ cmd->v4.block_list_size = cpu_to_le32(block_list_size);
+
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
+ if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
+ block_list_size);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < block_list_size; i++) {
+ u32 country;
+
+ if (wifi_pkg->package.elements[3 + i].type !=
+ ACPI_TYPE_INTEGER) {
+ IWL_DEBUG_RADIO(fwrt,
+ "TAS invalid array elem %d\n", 3 + i);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ country = wifi_pkg->package.elements[3 + i].integer.value;
+ cmd->v4.block_list_array[i] = cpu_to_le32(country);
+ IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
+ }
+
+ ret = 1;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
+
+int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+{
+ union acpi_object *wifi_pkg, *data;
+ u32 mcc_val;
+ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ mcc_val = wifi_pkg->package.elements[1].integer.value;
+
+ mcc[0] = (mcc_val >> 8) & 0xff;
+ mcc[1] = mcc_val & 0xff;
+ mcc[2] = '\0';
+
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
+
+u64 iwl_acpi_get_pwr_limit(struct device *dev)
+{
+ union acpi_object *data, *wifi_pkg;
+ u64 dflt_pwr_limit;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data)) {
+ dflt_pwr_limit = 0;
+ goto out;
+ }
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
+ dflt_pwr_limit = 0;
+ goto out_free;
+ }
+
+ dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+out_free:
+ kfree(data);
+out:
+ return dflt_pwr_limit;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
+
+int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ *extl_clk = wifi_pkg->package.elements[1].integer.value;
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
+
+static int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled, u8 num_chains, u8 num_sub_bands)
+{
+ int i, j, idx = 0;
+
+ /*
+ * The table from ACPI is flat, but we store it in a
+ * structured array.
+ */
+ for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
+ for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
+ /* if we don't have the values, use the default */
+ if (i >= num_chains || j >= num_sub_bands) {
+ profile->chains[i].subbands[j] = 0;
+ } else {
+ if (table[idx].type != ACPI_TYPE_INTEGER ||
+ table[idx].integer.value > U8_MAX)
+ return -EINVAL;
+
+ profile->chains[i].subbands[j] =
+ table[idx].integer.value;
+
+ idx++;
+ }
+ }
+ }
+
+ /* Only if all values were valid can the profile be enabled */
+ profile->enabled = enabled;
+
+ return 0;
+}
+
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
+ int i, j;
+
+ for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+ if (profs[i] > ACPI_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
+ }
+
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < n_subbands; j++) {
+ per_chain[i * n_subbands + j] =
+ cpu_to_le16(prof->chains[i].subbands[j]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->chains[i].subbands[j]);
+ }
+ }
+
+ return 0;
+}
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *table, *data;
+ int ret, tbl_rev;
+ u32 flags;
+ u8 num_chains, num_sub_bands;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* start by trying to read revision 2 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE_REV2,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 2) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ num_chains = ACPI_SAR_NUM_CHAINS_REV2;
+ num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
+
+ goto read_table;
+ }
+
+ /* then try revision 1 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE_REV1,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 1) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ num_chains = ACPI_SAR_NUM_CHAINS_REV1;
+ num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
+
+ goto read_table;
+ }
+
+ /* then finally revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE_REV0,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ num_chains = ACPI_SAR_NUM_CHAINS_REV0;
+ num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
+
+ goto read_table;
+ }
+
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+
+read_table:
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
+
+ flags = wifi_pkg->package.elements[1].integer.value;
+ fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS;
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
+ num_chains, num_sub_bands);
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ bool enabled;
+ int i, n_profiles, tbl_rev, pos;
+ int ret = 0;
+ u8 num_chains, num_sub_bands;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* start by trying to read revision 2 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE_REV2,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 2) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ num_chains = ACPI_SAR_NUM_CHAINS_REV2;
+ num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
+
+ goto read_table;
+ }
+
+ /* then try revision 1 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE_REV1,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 1) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ num_chains = ACPI_SAR_NUM_CHAINS_REV1;
+ num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
+
+ goto read_table;
+ }
+
+ /* then finally revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE_REV0,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ num_chains = ACPI_SAR_NUM_CHAINS_REV0;
+ num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
+
+ goto read_table;
+ }
+
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+
+read_table:
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* the tables start at element 3 */
+ pos = 3;
+
+ for (i = 0; i < n_profiles; i++) {
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
+ &fwrt->sar_profiles[i + 1], enabled,
+ num_chains, num_sub_bands);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += num_chains * num_sub_bands;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int i, j, k, ret, tbl_rev;
+ u8 num_bands, num_profiles;
+ static const struct {
+ u8 revisions;
+ u8 bands;
+ u8 profiles;
+ u8 min_profiles;
+ } rev_data[] = {
+ {
+ .revisions = BIT(3),
+ .bands = ACPI_GEO_NUM_BANDS_REV2,
+ .profiles = ACPI_NUM_GEO_PROFILES_REV3,
+ .min_profiles = 3,
+ },
+ {
+ .revisions = BIT(2),
+ .bands = ACPI_GEO_NUM_BANDS_REV2,
+ .profiles = ACPI_NUM_GEO_PROFILES,
+ },
+ {
+ .revisions = BIT(0) | BIT(1),
+ .bands = ACPI_GEO_NUM_BANDS_REV0,
+ .profiles = ACPI_NUM_GEO_PROFILES,
+ },
+ };
+ int idx;
+ /* start from one to skip the domain */
+ int entry_idx = 1;
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3);
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES);
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* read the highest revision we understand first */
+ for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) {
+ /* min_profiles != 0 requires num_profiles header */
+ u32 hdr_size = 1 + !!rev_data[idx].min_profiles;
+ u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE *
+ rev_data[idx].bands;
+ u32 max_size = hdr_size + profile_size * rev_data[idx].profiles;
+ u32 min_size;
+
+ if (!rev_data[idx].min_profiles)
+ min_size = max_size;
+ else
+ min_size = hdr_size +
+ profile_size * rev_data[idx].min_profiles;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data,
+ min_size, max_size,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (!(BIT(tbl_rev) & rev_data[idx].revisions))
+ continue;
+
+ num_bands = rev_data[idx].bands;
+ num_profiles = rev_data[idx].profiles;
+
+ if (rev_data[idx].min_profiles) {
+ /* read header that says # of profiles */
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > num_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_profiles = entry->integer.value;
+
+ /*
+ * this also validates >= min_profiles since we
+ * otherwise wouldn't have gotten the data when
+ * looking up in ACPI
+ */
+ if (wifi_pkg->package.count !=
+ hdr_size + profile_size * num_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ goto read_table;
+ }
+ }
+
+ if (idx < ARRAY_SIZE(rev_data))
+ ret = PTR_ERR(wifi_pkg);
+ else
+ ret = -ENOENT;
+ goto out_free;
+
+read_table:
+ fwrt->geo_rev = tbl_rev;
+ for (i = 0; i < num_profiles; i++) {
+ for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
+ union acpi_object *entry;
+
+ /*
+ * num_bands is either 2 or 3, if it's only 2 then
+ * fill the third band (6 GHz) with the values from
+ * 5 GHz (second band)
+ */
+ if (j >= num_bands) {
+ fwrt->geo_profiles[i].bands[j].max =
+ fwrt->geo_profiles[i].bands[1].max;
+ } else {
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > U8_MAX) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->geo_profiles[i].bands[j].max =
+ entry->integer.value;
+ }
+
+ for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
+ /* same here as above */
+ if (j >= num_bands) {
+ fwrt->geo_profiles[i].bands[j].chains[k] =
+ fwrt->geo_profiles[i].bands[1].chains[k];
+ } else {
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > U8_MAX) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->geo_profiles[i].bands[j].chains[k] =
+ entry->integer.value;
+ }
+ }
+ }
+ }
+
+ fwrt->geo_num_profiles = num_profiles;
+ fwrt->geo_enabled = true;
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
+{
+ int i, j;
+
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < n_profiles; i++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
+
+ chain->max_tx_power =
+ cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
+ chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
+ chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j,
+ fwrt->geo_profiles[i].bands[j].chains[0],
+ fwrt->geo_profiles[i].bands[j].chains[1],
+ fwrt->geo_profiles[i].bands[j].max);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
+
+__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u8 value;
+ __le32 config_bitmap = 0;
+
+ /*
+ ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
+ */
+ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+ DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &iwl_guid, &value);
+
+ if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+
+ /*
+ ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
+ */
+ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+ DSM_FUNC_DISABLE_SRD,
+ &iwl_guid, &value);
+ if (!ret) {
+ if (value == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (value == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ return config_bitmap;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
+
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data, *flags;
+ int i, j, ret, tbl_rev, num_sub_bands = 0;
+ int idx = 2;
+
+ fwrt->ppag_flags = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev == 1 || tbl_rev == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ IWL_DEBUG_RADIO(fwrt,
+ "Reading PPAG table v2 (tbl_rev=%d)\n",
+ tbl_rev);
+ goto read_table;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ /* try to read ppag table revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n");
+ goto read_table;
+ }
+
+read_table:
+ fwrt->ppag_ver = tbl_rev;
+ flags = &wifi_pkg->package.elements[1];
+
+ if (flags->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
+
+ if (!fwrt->ppag_flags) {
+ ret = 0;
+ goto out_free;
+ }
+
+ /*
+ * read, verify gain values and save them into the PPAG table.
+ * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
+ * following sub-bands to High-Band (5GHz).
+ */
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ union acpi_object *ent;
+
+ ent = &wifi_pkg->package.elements[idx++];
+ if (ent->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
+
+ if ((j == 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
+ (j != 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
+ fwrt->ppag_flags = 0;
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ }
+
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+ if (!fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d but FW supports v1, sending truncated table\n",
+ fwrt->ppag_ver);
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else if (cmd_ver == 2 || cmd_ver == 3) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+ } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
new file mode 100644
index 000000000..6f361c591
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2022 Intel Corporation
+ */
+#ifndef __iwl_fw_acpi__
+#define __iwl_fw_acpi__
+
+#include <linux/acpi.h>
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
+
+#define ACPI_WRDS_METHOD "WRDS"
+#define ACPI_EWRD_METHOD "EWRD"
+#define ACPI_WGDS_METHOD "WGDS"
+#define ACPI_WRDD_METHOD "WRDD"
+#define ACPI_SPLC_METHOD "SPLC"
+#define ACPI_ECKV_METHOD "ECKV"
+#define ACPI_PPAG_METHOD "PPAG"
+#define ACPI_WTAS_METHOD "WTAS"
+
+#define ACPI_WIFI_DOMAIN (0x07)
+
+#define ACPI_SAR_PROFILE_NUM 4
+
+#define ACPI_NUM_GEO_PROFILES 3
+#define ACPI_NUM_GEO_PROFILES_REV3 8
+#define ACPI_GEO_PER_CHAIN_SIZE 3
+
+#define ACPI_SAR_NUM_CHAINS_REV0 2
+#define ACPI_SAR_NUM_CHAINS_REV1 2
+#define ACPI_SAR_NUM_CHAINS_REV2 4
+#define ACPI_SAR_NUM_SUB_BANDS_REV0 5
+#define ACPI_SAR_NUM_SUB_BANDS_REV1 11
+#define ACPI_SAR_NUM_SUB_BANDS_REV2 11
+
+#define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV0 + 2)
+#define ACPI_WRDS_WIFI_DATA_SIZE_REV1 (ACPI_SAR_NUM_CHAINS_REV1 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV1 + 2)
+#define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV2 + 2)
+#define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \
+ ACPI_SAR_NUM_CHAINS_REV0 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV0 + 3)
+#define ACPI_EWRD_WIFI_DATA_SIZE_REV1 ((ACPI_SAR_PROFILE_NUM - 1) * \
+ ACPI_SAR_NUM_CHAINS_REV1 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV1 + 3)
+#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
+ ACPI_SAR_NUM_CHAINS_REV2 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
+
+/* revision 0 and 1 are identical, except for the semantics in the FW */
+#define ACPI_GEO_NUM_BANDS_REV0 2
+#define ACPI_GEO_NUM_BANDS_REV2 3
+#define ACPI_GEO_NUM_CHAINS 2
+
+#define ACPI_WRDD_WIFI_DATA_SIZE 2
+#define ACPI_SPLC_WIFI_DATA_SIZE 2
+#define ACPI_ECKV_WIFI_DATA_SIZE 2
+
+/*
+ * TAS size: 1 elelment for type,
+ * 1 element for enabled field,
+ * 1 element for block list size,
+ * 16 elements for block list array
+ */
+#define APCI_WTAS_BLACK_LIST_MAX 16
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
+#define ACPI_WTAS_ENABLED_MSK 0x1
+#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
+#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
+#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
+#define ACPI_WTAS_ENABLE_IEC_POS 0x2
+#define ACPI_WTAS_USA_UHB_MSK BIT(16)
+#define ACPI_WTAS_USA_UHB_POS 16
+
+
+#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS_V1) + 2)
+#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS_V2) + 2)
+
+/* PPAG gain value bounds in 1/8 dBm */
+#define ACPI_PPAG_MIN_LB -16
+#define ACPI_PPAG_MAX_LB 24
+#define ACPI_PPAG_MIN_HB -16
+#define ACPI_PPAG_MAX_HB 40
+#define ACPI_PPAG_MASK 3
+#define IWL_PPAG_ETSI_MASK BIT(0)
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+#define IWL_REDUCE_POWER_FLAGS_POS 1
+
+/*
+ * The profile for revision 2 is a superset of revision 1, which is in
+ * turn a superset of revision 0. So we can store all revisions
+ * inside revision 2, which is what we represent here.
+ */
+struct iwl_sar_profile_chain {
+ u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
+};
+
+struct iwl_sar_profile {
+ bool enabled;
+ struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_geo_profile_band {
+ u8 max;
+ u8 chains[ACPI_GEO_NUM_CHAINS];
+};
+
+struct iwl_geo_profile {
+ struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
+};
+
+enum iwl_dsm_funcs_rev_0 {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
+};
+
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
+/* DSM RFI uses a different GUID, so need separate definitions */
+
+#define DSM_RFI_FUNC_ENABLE 3
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_ENABLE,
+ DSM_VALUE_RFI_DISABLE,
+ DSM_VALUE_RFI_MAX
+};
+
+#ifdef CONFIG_ACPI
+
+struct iwl_fw_runtime;
+
+extern const guid_t iwl_guid;
+extern const guid_t iwl_rfi_guid;
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
+ const guid_t *guid, u8 *value);
+
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+ const guid_t *guid, u32 *value);
+
+union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev,
+ union acpi_object *data,
+ int min_data_size,
+ int max_data_size,
+ int *tbl_rev);
+/**
+ * iwl_acpi_get_mcc - read MCC from ACPI, if available
+ *
+ * @dev: the struct device
+ * @mcc: output buffer (3 bytes) that will get the MCC
+ *
+ * This function tries to read the current MCC from ACPI if available.
+ */
+int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+
+u64 iwl_acpi_get_pwr_limit(struct device *dev);
+
+/*
+ * iwl_acpi_get_eckv - read external clock validation from ACPI, if available
+ *
+ * @dev: the struct device
+ * @extl_clk: output var (2 bytes) that will get the clk indication.
+ *
+ * This function tries to read the external clock indication
+ * from ACPI if available.
+ */
+int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b);
+
+int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
+
+int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ union iwl_tas_config_cmd *cmd, int fw_ver);
+
+__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
+#else /* CONFIG_ACPI */
+
+static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
+ int func, union acpi_object *args)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
+ const guid_t *guid, u8 *value)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+ const guid_t *guid, u32 *value)
+{
+ return -ENOENT;
+}
+
+static inline union acpi_object *
+iwl_acpi_get_wifi_pkg_range(struct device *dev,
+ union acpi_object *data,
+ int min_data_size, int max_data_size,
+ int *tbl_rev)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+{
+ return 0;
+}
+
+static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return 1;
+}
+
+static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
+static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+ union iwl_tas_config_cmd *cmd, int fw_ver)
+{
+ return -ENOENT;
+}
+
+static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ return 0;
+}
+
+static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
+#endif /* CONFIG_ACPI */
+
+static inline union acpi_object *
+iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+ int data_size, int *tbl_rev)
+{
+ return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size,
+ tbl_rev);
+}
+
+#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
new file mode 100644
index 000000000..e00ab21e7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_alive_h__
+#define __iwl_fw_api_alive_h__
+
+/* alive response is_valid values */
+#define ALIVE_RESP_UCODE_OK BIT(0)
+#define ALIVE_RESP_RFKILL BIT(1)
+
+/* alive response ver_type values */
+enum {
+ FW_TYPE_HW = 0,
+ FW_TYPE_PROT = 1,
+ FW_TYPE_AP = 2,
+ FW_TYPE_WOWLAN = 3,
+ FW_TYPE_TIMING = 4,
+ FW_TYPE_WIPAN = 5
+};
+
+/* alive response ver_subtype values */
+enum {
+ FW_SUBTYPE_FULL_FEATURE = 0,
+ FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+ FW_SUBTYPE_REDUCED = 2,
+ FW_SUBTYPE_ALIVE_ONLY = 3,
+ FW_SUBTYPE_WOWLAN = 4,
+ FW_SUBTYPE_AP_SUBTYPE = 5,
+ FW_SUBTYPE_WIPAN = 6,
+ FW_SUBTYPE_INITIALIZE = 9
+};
+
+#define IWL_ALIVE_STATUS_ERR 0xDEAD
+#define IWL_ALIVE_STATUS_OK 0xCAFE
+
+#define IWL_ALIVE_FLG_RFKILL BIT(0)
+
+struct iwl_lmac_debug_addrs {
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+ __le32 st_fwrd_addr; /* pointer to Store and forward */
+ __le32 st_fwrd_size;
+} __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */
+
+struct iwl_lmac_alive {
+ __le32 ucode_major;
+ __le32 ucode_minor;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le32 timestamp;
+ struct iwl_lmac_debug_addrs dbg_ptrs;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_umac_debug_addrs {
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */
+
+struct iwl_umac_alive {
+ __le32 umac_major; /* UMAC version: major */
+ __le32 umac_minor; /* UMAC version: minor */
+ struct iwl_umac_debug_addrs dbg_ptrs;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct iwl_sku_id {
+ __le32 data[3];
+} __packed; /* SKU_ID_API_S_VER_1 */
+
+struct iwl_alive_ntf_v3 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data;
+ struct iwl_umac_alive umac_data;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_alive_ntf_v4 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_4 */
+
+struct iwl_alive_ntf_v5 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+ struct iwl_sku_id sku_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
+
+struct iwl_imr_alive_info {
+ __le64 base_addr;
+ __le32 size;
+ __le32 enabled;
+} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */
+
+struct iwl_alive_ntf_v6 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+ struct iwl_sku_id sku_id;
+ struct iwl_imr_alive_info imr;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
+
+/**
+ * enum iwl_extended_cfg_flag - commands driver may send before
+ * finishing init flow
+ * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
+ * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ */
+enum iwl_extended_cfg_flags {
+ IWL_INIT_DEBUG_CFG,
+ IWL_INIT_NVM,
+ IWL_INIT_PHY,
+};
+
+/**
+ * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: values from iwl_extended_cfg_flags
+ */
+struct iwl_init_extended_cfg_cmd {
+ __le32 init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor: radio flavor
+ * @radio_step: radio version step
+ * @radio_dash: radio version dash
+ */
+struct iwl_radio_version_notif {
+ __le32 radio_flavor;
+ __le32 radio_step;
+ __le32 radio_dash;
+} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+enum iwl_card_state_flags {
+ CARD_ENABLED = 0x00,
+ HW_CARD_DISABLED = 0x01,
+ SW_CARD_DISABLED = 0x02,
+ CT_KILL_CARD_DISABLED = 0x04,
+ HALT_CARD_DISABLED = 0x08,
+ CARD_DISABLED_MSK = 0x0f,
+ CARD_IS_RX_ON = 0x10,
+};
+
+/**
+ * enum iwl_error_recovery_flags - flags for error recovery cmd
+ * @ERROR_RECOVERY_UPDATE_DB: update db from blob sent
+ * @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery
+ */
+enum iwl_error_recovery_flags {
+ ERROR_RECOVERY_UPDATE_DB = BIT(0),
+ ERROR_RECOVERY_END_OF_RECOVERY = BIT(1),
+};
+
+/**
+ * struct iwl_fw_error_recovery_cmd - recovery cmd sent upon assert
+ * @flags: &enum iwl_error_recovery_flags
+ * @buf_size: db buffer size in bytes
+ */
+struct iwl_fw_error_recovery_cmd {
+ __le32 flags;
+ __le32 buf_size;
+} __packed; /* ERROR_RECOVERY_CMD_HDR_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
new file mode 100644
index 000000000..29e2816e7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2020 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_binding_h__
+#define __iwl_fw_api_binding_h__
+
+#include <fw/file.h>
+#include <fw/img.h>
+
+#define MAX_MACS_IN_BINDING (3)
+#define MAX_BINDINGS (4)
+
+/**
+ * struct iwl_binding_cmd_v1 - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding,
+ * &enum iwl_ctxt_id_and_color
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding,
+ * &enum iwl_ctxt_id_and_color
+ * @phy: PHY id and color which belongs to the binding,
+ * &enum iwl_ctxt_id_and_color
+ */
+struct iwl_binding_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* BINDING_DATA_API_S_VER_1 */
+ __le32 macs[MAX_MACS_IN_BINDING];
+ __le32 phy;
+} __packed; /* BINDING_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_binding_cmd - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding,
+ * &enum iwl_ctxt_id_and_color
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * &enum iwl_ctxt_id_and_color
+ * @phy: PHY id and color which belongs to the binding
+ * &enum iwl_ctxt_id_and_color
+ * @lmac_id: the lmac id the binding belongs to
+ */
+struct iwl_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* BINDING_DATA_API_S_VER_1 */
+ __le32 macs[MAX_MACS_IN_BINDING];
+ __le32 phy;
+ __le32 lmac_id;
+} __packed; /* BINDING_CMD_API_S_VER_2 */
+
+#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1)
+#define IWL_LMAC_24G_INDEX 0
+#define IWL_LMAC_5G_INDEX 1
+
+static inline u32 iwl_mvm_get_lmac_id(const struct iwl_fw *fw,
+ enum nl80211_band band){
+ if (!fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+ band == NL80211_BAND_2GHZ)
+ return IWL_LMAC_24G_INDEX;
+ return IWL_LMAC_5G_INDEX;
+}
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWL_MVM_MAX_QUOTA 128
+
+/**
+ * struct iwl_time_quota_data_v1 - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding,
+ * &enum iwl_ctxt_id_and_color
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwl_time_quota_data_v1 {
+ __le32 id_and_color;
+ __le32 quota;
+ __le32 max_duration;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ * Note: on non-CDB the fourth one is the auxilary mac and is
+ * essentially zero.
+ * On CDB the fourth one is a regular binding.
+ */
+struct iwl_time_quota_cmd_v1 {
+ struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+enum iwl_quota_low_latency {
+ IWL_QUOTA_LOW_LATENCY_NONE = 0,
+ IWL_QUOTA_LOW_LATENCY_TX = BIT(0),
+ IWL_QUOTA_LOW_LATENCY_RX = BIT(1),
+ IWL_QUOTA_LOW_LATENCY_TX_RX =
+ IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX,
+};
+
+/**
+ * struct iwl_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding.
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ * @low_latency: low latency status, &enum iwl_quota_low_latency
+ */
+struct iwl_time_quota_data {
+ __le32 id_and_color;
+ __le32 quota;
+ __le32 max_duration;
+ __le32 low_latency;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * Note: on non-CDB the fourth one is the auxilary mac and is essentially zero.
+ * On CDB the fourth one is a regular binding.
+ *
+ * @quotas: allocations per binding
+ */
+struct iwl_time_quota_cmd {
+ struct iwl_time_quota_data quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_binding_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h
new file mode 100644
index 000000000..d130d4f85
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_cmdhdr_h__
+#define __iwl_fw_api_cmdhdr_h__
+
+/**
+ * DOC: Host command section
+ *
+ * A host command is a command issued by the upper layer to the fw. There are
+ * several versions of fw that have several APIs. The transport layer is
+ * completely agnostic to these differences.
+ * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
+ */
+#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s) ((s) & 0xff)
+#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+ return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+ return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+ return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode))
+#define DEF_ID(opcode) ((1 << 8) | (opcode))
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP 1
+
+/**
+ * struct iwl_cmd_header - (short) command header format
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+ /**
+ * @cmd: Command ID: REPLY_RXON, etc.
+ */
+ u8 cmd;
+ /**
+ * @group_id: group ID, for commands with groups
+ */
+ u8 group_id;
+ /**
+ * @sequence:
+ * Sequence number for the command.
+ *
+ * The driver sets up the sequence number to values of its choosing.
+ * uCode does not use this value, but passes it back to the driver
+ * when sending the response to each driver-originated command, so
+ * the driver can match the response to the command. Since the values
+ * don't get used by uCode, the driver may set up an arbitrary format.
+ *
+ * There is one exception: uCode sets bit 15 when it originates
+ * the response/notification, i.e. when the response/notification
+ * is not a direct response to a command sent by the driver. For
+ * example, uCode issues REPLY_RX when it sends a received frame
+ * to the driver; it is not a direct response to any driver command.
+ *
+ * The Linux driver uses the following format:
+ *
+ * 0:7 tfd index - position within TX queue
+ * 8:12 TX queue id
+ * 13:14 reserved
+ * 15 unsolicited RX or uCode-originated notification
+ */
+ __le16 sequence;
+} __packed;
+
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ *
+ * @cmd: command ID, like in &struct iwl_cmd_header
+ * @group_id: group ID, like in &struct iwl_cmd_header
+ * @sequence: sequence, like in &struct iwl_cmd_header
+ * @length: length of the command
+ * @reserved: reserved
+ * @version: command version
+ */
+struct iwl_cmd_header_wide {
+ u8 cmd;
+ u8 group_id;
+ __le16 sequence;
+ __le16 length;
+ u8 reserved;
+ u8 version;
+} __packed;
+
+/**
+ * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations
+ * @type: type of the result - mostly ignored
+ * @length: length of the data
+ * @data: data, length in @length
+ */
+struct iwl_calib_res_notif_phy_db {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_phy_db_cmd - configure operational ucode
+ * @type: type of the data
+ * @length: length of the data
+ * @data: data, length in @length
+ */
+struct iwl_phy_db_cmd {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwl_cmd_response {
+ __le32 status;
+};
+
+#endif /* __iwl_fw_api_cmdhdr_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
new file mode 100644
index 000000000..3e81e9369
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2013-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_coex_h__
+#define __iwl_fw_api_coex_h__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define BITS(nb) (BIT(nb) - 1)
+
+enum iwl_bt_coex_lut_type {
+ BT_COEX_TIGHT_LUT = 0,
+ BT_COEX_LOOSE_LUT,
+ BT_COEX_TX_DIS_LUT,
+
+ BT_COEX_MAX_LUT,
+ BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
+
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
+
+enum iwl_bt_coex_mode {
+ BT_COEX_DISABLE = 0x0,
+ BT_COEX_NW = 0x1,
+ BT_COEX_BT = 0x2,
+ BT_COEX_WIFI = 0x3,
+}; /* BT_COEX_MODES_E */
+
+enum iwl_bt_coex_enabled_modules {
+ BT_COEX_MPLUT_ENABLED = BIT(0),
+ BT_COEX_MPLUT_BOOST_ENABLED = BIT(1),
+ BT_COEX_SYNC2SCO_ENABLED = BIT(2),
+ BT_COEX_CORUN_ENABLED = BIT(3),
+ BT_COEX_HIGH_BAND_RET = BIT(4),
+}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+/**
+ * struct iwl_bt_coex_cmd - bt coex configuration command
+ * @mode: &enum iwl_bt_coex_mode
+ * @enabled_modules: &enum iwl_bt_coex_enabled_modules
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwl_bt_coex_cmd {
+ __le32 mode;
+ __le32 enabled_modules;
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_bt_coex_reduced_txp_update_cmd
+ * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
+ * bits are the sta_id (value)
+ */
+struct iwl_bt_coex_reduced_txp_update_cmd {
+ __le32 reduced_txp;
+} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */
+
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci: primary channel inhibition bitmap
+ * @primary_ch_phy_id: primary channel PHY ID
+ * @bt_secondary_ci: secondary channel inhibition bitmap
+ * @secondary_ch_phy_id: secondary channel PHY ID
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+ __le64 bt_primary_ci;
+ __le32 primary_ch_phy_id;
+
+ __le64 bt_secondary_ci;
+ __le32 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_2 */
+
+#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
+ BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
+ BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
+
+enum iwl_bt_mxbox_dw0 {
+ BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
+ BT_MBOX(0, LE_PROF1, 3, 1),
+ BT_MBOX(0, LE_PROF2, 4, 1),
+ BT_MBOX(0, LE_PROF_OTHER, 5, 1),
+ BT_MBOX(0, CHL_SEQ_N, 8, 4),
+ BT_MBOX(0, INBAND_S, 13, 1),
+ BT_MBOX(0, LE_MIN_RSSI, 16, 4),
+ BT_MBOX(0, LE_SCAN, 20, 1),
+ BT_MBOX(0, LE_ADV, 21, 1),
+ BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
+ BT_MBOX(0, OPEN_CON_1, 28, 2),
+};
+
+enum iwl_bt_mxbox_dw1 {
+ BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
+ BT_MBOX(1, IP_SR, 4, 1),
+ BT_MBOX(1, LE_MSTR, 5, 1),
+ BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
+ BT_MBOX(1, MSG_TYPE, 16, 3),
+ BT_MBOX(1, SSN, 19, 2),
+};
+
+enum iwl_bt_mxbox_dw2 {
+ BT_MBOX(2, SNIFF_ACT, 0, 3),
+ BT_MBOX(2, PAG, 3, 1),
+ BT_MBOX(2, INQUIRY, 4, 1),
+ BT_MBOX(2, CONN, 5, 1),
+ BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
+ BT_MBOX(2, DISC, 13, 1),
+ BT_MBOX(2, SCO_TX_ACT, 16, 2),
+ BT_MBOX(2, SCO_RX_ACT, 18, 2),
+ BT_MBOX(2, ESCO_RE_TX, 20, 2),
+ BT_MBOX(2, SCO_DURATION, 24, 6),
+};
+
+enum iwl_bt_mxbox_dw3 {
+ BT_MBOX(3, SCO_STATE, 0, 1),
+ BT_MBOX(3, SNIFF_STATE, 1, 1),
+ BT_MBOX(3, A2DP_STATE, 2, 1),
+ BT_MBOX(3, ACL_STATE, 3, 1),
+ BT_MBOX(3, MSTR_STATE, 4, 1),
+ BT_MBOX(3, OBX_STATE, 5, 1),
+ BT_MBOX(3, A2DP_SRC, 6, 1),
+ BT_MBOX(3, OPEN_CON_2, 8, 2),
+ BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
+ BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
+ BT_MBOX(3, INBAND_P, 13, 1),
+ BT_MBOX(3, MSG_TYPE_2, 16, 3),
+ BT_MBOX(3, SSN_2, 19, 2),
+ BT_MBOX(3, UPDATE_REQUEST, 21, 1),
+};
+
+#define BT_MBOX_MSG(_notif, _num, _field) \
+ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
+ >> BT_MBOX##_num##_##_field##_POS)
+
+#define BT_MBOX_PRINT(_num, _field, _end) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t%s: %d%s", \
+ #_field, \
+ BT_MBOX_MSG(notif, _num, _field), \
+ true ? "\n" : ", ")
+enum iwl_bt_activity_grading {
+ BT_OFF = 0,
+ BT_ON_NO_CONNECTION = 1,
+ BT_LOW_TRAFFIC = 2,
+ BT_HIGH_TRAFFIC = 3,
+ BT_VERY_HIGH_TRAFFIC = 4,
+
+ BT_MAX_AG,
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
+
+enum iwl_bt_ci_compliance {
+ BT_CI_COMPLIANCE_NONE = 0,
+ BT_CI_COMPLIANCE_PRIMARY = 1,
+ BT_CI_COMPLIANCE_SECONDARY = 2,
+ BT_CI_COMPLIANCE_BOTH = 3,
+}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
+
+/**
+ * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * @mbox_msg: message from BT to WiFi
+ * @msg_idx: the index of the message
+ * @bt_ci_compliance: enum %iwl_bt_ci_compliance
+ * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
+ * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
+ * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
+ * @ttc_status: is TTC enabled - one bit per PHY
+ * @rrc_status: is RRC enabled - one bit per PHY
+ * @reserved: reserved
+ */
+struct iwl_bt_coex_profile_notif {
+ __le32 mbox_msg[4];
+ __le32 msg_idx;
+ __le32 bt_ci_compliance;
+
+ __le32 primary_ch_lut;
+ __le32 secondary_ch_lut;
+ __le32 bt_activity_grading;
+ u8 ttc_status;
+ u8 rrc_status;
+ __le16 reserved;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+
+#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
new file mode 100644
index 000000000..0b052c2e5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -0,0 +1,613 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2022 Intel Corporation
+ */
+#ifndef __iwl_fw_api_commands_h__
+#define __iwl_fw_api_commands_h__
+
+/**
+ * enum iwl_mvm_command_groups - command groups for the firmware
+ * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds
+ * @LONG_GROUP: legacy group with long header, also uses command IDs
+ * from &enum iwl_legacy_cmds
+ * @SYSTEM_GROUP: system group, uses command IDs from
+ * &enum iwl_system_subcmd_ids
+ * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from
+ * &enum iwl_mac_conf_subcmd_ids
+ * @PHY_OPS_GROUP: PHY operations group, uses command IDs from
+ * &enum iwl_phy_ops_subcmd_ids
+ * @DATA_PATH_GROUP: data path group, uses command IDs from
+ * &enum iwl_data_path_subcmd_ids
+ * @SCAN_GROUP: scan group, uses command IDs from
+ * &enum iwl_scan_subcmd_ids
+ * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
+ * @LOCATION_GROUP: location group, uses command IDs from
+ * &enum iwl_location_subcmd_ids
+ * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
+ * &enum iwl_prot_offload_subcmd_ids
+ * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
+ * &enum iwl_regulatory_and_nvm_subcmd_ids
+ * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
+ */
+enum iwl_mvm_command_groups {
+ LEGACY_GROUP = 0x0,
+ LONG_GROUP = 0x1,
+ SYSTEM_GROUP = 0x2,
+ MAC_CONF_GROUP = 0x3,
+ PHY_OPS_GROUP = 0x4,
+ DATA_PATH_GROUP = 0x5,
+ SCAN_GROUP = 0x6,
+ NAN_GROUP = 0x7,
+ LOCATION_GROUP = 0x8,
+ PROT_OFFLOAD_GROUP = 0xb,
+ REGULATORY_AND_NVM_GROUP = 0xc,
+ DEBUG_GROUP = 0xf,
+};
+
+/**
+ * enum iwl_legacy_cmds - legacy group command IDs
+ */
+enum iwl_legacy_cmds {
+ /**
+ * @UCODE_ALIVE_NTFY:
+ * Alive data from the firmware, as described in
+ * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or
+ * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6.
+ */
+ UCODE_ALIVE_NTFY = 0x1,
+
+ /**
+ * @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
+ */
+ REPLY_ERROR = 0x2,
+
+ /**
+ * @ECHO_CMD: Send data to the device to have it returned immediately.
+ */
+ ECHO_CMD = 0x3,
+
+ /**
+ * @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
+ */
+ INIT_COMPLETE_NOTIF = 0x4,
+
+ /**
+ * @PHY_CONTEXT_CMD:
+ * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd
+ * or &struct iwl_phy_context_cmd_v1.
+ */
+ PHY_CONTEXT_CMD = 0x8,
+
+ /**
+ * @DBG_CFG: Debug configuration command.
+ */
+ DBG_CFG = 0x9,
+
+ /**
+ * @SCAN_ITERATION_COMPLETE_UMAC:
+ * Firmware indicates a scan iteration completed, using
+ * &struct iwl_umac_scan_iter_complete_notif.
+ */
+ SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
+
+ /**
+ * @SCAN_CFG_CMD:
+ * uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2
+ * or &struct iwl_scan_config
+ */
+ SCAN_CFG_CMD = 0xc,
+
+ /**
+ * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac
+ */
+ SCAN_REQ_UMAC = 0xd,
+
+ /**
+ * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort
+ */
+ SCAN_ABORT_UMAC = 0xe,
+
+ /**
+ * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
+ */
+ SCAN_COMPLETE_UMAC = 0xf,
+
+ /**
+ * @BA_WINDOW_STATUS_NOTIFICATION_ID:
+ * uses &struct iwl_ba_window_status_notif
+ */
+ BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
+
+ /**
+ * @ADD_STA_KEY:
+ * &struct iwl_mvm_add_sta_key_cmd_v1 or
+ * &struct iwl_mvm_add_sta_key_cmd.
+ */
+ ADD_STA_KEY = 0x17,
+
+ /**
+ * @ADD_STA:
+ * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
+ */
+ ADD_STA = 0x18,
+
+ /**
+ * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
+ */
+ REMOVE_STA = 0x19,
+
+ /**
+ * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd
+ */
+ FW_GET_ITEM_CMD = 0x1a,
+
+ /**
+ * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+ * &struct iwl_tx_cmd_gen3,
+ * response in &struct iwl_mvm_tx_resp or
+ * &struct iwl_mvm_tx_resp_v3
+ */
+ TX_CMD = 0x1c,
+
+ /**
+ * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
+ */
+ TXPATH_FLUSH = 0x1e,
+
+ /**
+ * @MGMT_MCAST_KEY:
+ * &struct iwl_mvm_mgmt_mcast_key_cmd or
+ * &struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ */
+ MGMT_MCAST_KEY = 0x1f,
+
+ /* scheduler config */
+ /**
+ * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
+ * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
+ * for newer (22000) hardware.
+ */
+ SCD_QUEUE_CFG = 0x1d,
+
+ /**
+ * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd
+ */
+ WEP_KEY = 0x20,
+
+ /**
+ * @SHARED_MEM_CFG:
+ * retrieve shared memory configuration - response in
+ * &struct iwl_shared_mem_cfg
+ */
+ SHARED_MEM_CFG = 0x25,
+
+ /**
+ * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd
+ */
+ TDLS_CHANNEL_SWITCH_CMD = 0x27,
+
+ /**
+ * @TDLS_CHANNEL_SWITCH_NOTIFICATION:
+ * uses &struct iwl_tdls_channel_switch_notif
+ */
+ TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
+
+ /**
+ * @TDLS_CONFIG_CMD:
+ * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res
+ */
+ TDLS_CONFIG_CMD = 0xa7,
+
+ /**
+ * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
+ */
+ MAC_CONTEXT_CMD = 0x28,
+
+ /**
+ * @TIME_EVENT_CMD:
+ * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
+ */
+ TIME_EVENT_CMD = 0x29, /* both CMD and response */
+
+ /**
+ * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
+ */
+ TIME_EVENT_NOTIFICATION = 0x2a,
+
+ /**
+ * @BINDING_CONTEXT_CMD:
+ * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
+ */
+ BINDING_CONTEXT_CMD = 0x2b,
+
+ /**
+ * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
+ */
+ TIME_QUOTA_CMD = 0x2c,
+
+ /**
+ * @NON_QOS_TX_COUNTER_CMD:
+ * command is &struct iwl_nonqos_seq_query_cmd
+ */
+ NON_QOS_TX_COUNTER_CMD = 0x2d,
+
+ /**
+ * @LEDS_CMD: command is &struct iwl_led_cmd
+ */
+ LEDS_CMD = 0x48,
+
+ /**
+ * @LQ_CMD: using &struct iwl_lq_cmd
+ */
+ LQ_CMD = 0x4e,
+
+ /**
+ * @FW_PAGING_BLOCK_CMD:
+ * &struct iwl_fw_paging_cmd
+ */
+ FW_PAGING_BLOCK_CMD = 0x4f,
+
+ /**
+ * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac
+ */
+ SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+
+ /**
+ * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents
+ */
+ SCAN_OFFLOAD_ABORT_CMD = 0x52,
+
+ /**
+ * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req
+ */
+ HOT_SPOT_CMD = 0x53,
+
+ /**
+ * @SCAN_OFFLOAD_COMPLETE:
+ * notification, &struct iwl_periodic_scan_complete
+ */
+ SCAN_OFFLOAD_COMPLETE = 0x6D,
+
+ /**
+ * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD:
+ * update scan offload (scheduled scan) profiles/blocklist/etc.
+ */
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+
+ /**
+ * @MATCH_FOUND_NOTIFICATION: scan match found
+ */
+ MATCH_FOUND_NOTIFICATION = 0xd9,
+
+ /**
+ * @SCAN_ITERATION_COMPLETE:
+ * uses &struct iwl_lmac_scan_complete_notif
+ */
+ SCAN_ITERATION_COMPLETE = 0xe7,
+
+ /* Phy */
+ /**
+ * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd_v1 or &struct iwl_phy_cfg_cmd_v3
+ */
+ PHY_CONFIGURATION_CMD = 0x6a,
+
+ /**
+ * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db
+ */
+ CALIB_RES_NOTIF_PHY_DB = 0x6b,
+
+ /**
+ * @PHY_DB_CMD: &struct iwl_phy_db_cmd
+ */
+ PHY_DB_CMD = 0x6c,
+
+ /**
+ * @POWER_TABLE_CMD: &struct iwl_device_power_cmd
+ */
+ POWER_TABLE_CMD = 0x77,
+
+ /**
+ * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION:
+ * &struct iwl_uapsd_misbehaving_ap_notif
+ */
+ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
+
+ /**
+ * @LTR_CONFIG: &struct iwl_ltr_config_cmd
+ */
+ LTR_CONFIG = 0xee,
+
+ /**
+ * @REPLY_THERMAL_MNG_BACKOFF:
+ * Thermal throttling command
+ */
+ REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+
+ /**
+ * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
+ */
+ NVM_ACCESS_CMD = 0x88,
+
+ /**
+ * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif
+ */
+ BEACON_NOTIFICATION = 0x90,
+
+ /**
+ * @BEACON_TEMPLATE_CMD:
+ * Uses one of &struct iwl_mac_beacon_cmd_v6,
+ * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
+ * depending on the device version.
+ */
+ BEACON_TEMPLATE_CMD = 0x91,
+ /**
+ * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
+ */
+ TX_ANT_CONFIGURATION_CMD = 0x98,
+
+ /**
+ * @STATISTICS_CMD:
+ * one of &struct iwl_statistics_cmd,
+ * &struct iwl_notif_statistics_v11,
+ * &struct iwl_notif_statistics_v10,
+ * &struct iwl_notif_statistics,
+ * &struct iwl_statistics_operational_ntfy_ver_14
+ */
+ STATISTICS_CMD = 0x9c,
+
+ /**
+ * @STATISTICS_NOTIFICATION:
+ * one of &struct iwl_notif_statistics_v10,
+ * &struct iwl_notif_statistics_v11,
+ * &struct iwl_notif_statistic,
+ * &struct iwl_statistics_operational_ntfy_ver_14
+ * &struct iwl_statistics_operational_ntfy
+ */
+ STATISTICS_NOTIFICATION = 0x9d,
+
+ /**
+ * @EOSP_NOTIFICATION:
+ * Notify that a service period ended,
+ * &struct iwl_mvm_eosp_notification
+ */
+ EOSP_NOTIFICATION = 0x9e,
+
+ /**
+ * @REDUCE_TX_POWER_CMD:
+ * &struct iwl_dev_tx_power_cmd
+ */
+ REDUCE_TX_POWER_CMD = 0x9f,
+
+ /**
+ * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
+ */
+ MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+ /**
+ * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
+ */
+ MAC_PM_POWER_TABLE = 0xa9,
+
+ /**
+ * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
+ */
+ MFUART_LOAD_NOTIFICATION = 0xb1,
+
+ /**
+ * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
+ */
+ RSS_CONFIG_CMD = 0xb3,
+
+ /**
+ * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
+ */
+ REPLY_RX_PHY_CMD = 0xc0,
+
+ /**
+ * @REPLY_RX_MPDU_CMD:
+ * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
+ */
+ REPLY_RX_MPDU_CMD = 0xc1,
+
+ /**
+ * @BAR_FRAME_RELEASE: Frame release from BAR notification, used for
+ * multi-TID BAR (previously, the BAR frame itself was reported
+ * instead). Uses &struct iwl_bar_frame_release.
+ */
+ BAR_FRAME_RELEASE = 0xc2,
+
+ /**
+ * @FRAME_RELEASE:
+ * Frame release (reorder helper) notification, uses
+ * &struct iwl_frame_release
+ */
+ FRAME_RELEASE = 0xc3,
+
+ /**
+ * @BA_NOTIF:
+ * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
+ * or &struct iwl_mvm_ba_notif depending on the HW
+ */
+ BA_NOTIF = 0xc5,
+
+ /* Location Aware Regulatory */
+ /**
+ * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
+ */
+ MCC_UPDATE_CMD = 0xc8,
+
+ /**
+ * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
+ */
+ MCC_CHUB_UPDATE_CMD = 0xc9,
+
+ /**
+ * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker
+ * with &struct iwl_mvm_marker_rsp
+ */
+ MARKER_CMD = 0xcb,
+
+ /**
+ * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
+ */
+ BT_PROFILE_NOTIFICATION = 0xce,
+
+ /**
+ * @BT_CONFIG: &struct iwl_bt_coex_cmd
+ */
+ BT_CONFIG = 0x9b,
+
+ /**
+ * @BT_COEX_UPDATE_REDUCED_TXP:
+ * &struct iwl_bt_coex_reduced_txp_update_cmd
+ */
+ BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
+
+ /**
+ * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
+ */
+ BT_COEX_CI = 0x5d,
+
+ /**
+ * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
+ */
+ REPLY_SF_CFG_CMD = 0xd1,
+ /**
+ * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
+ */
+ REPLY_BEACON_FILTERING_CMD = 0xd2,
+
+ /**
+ * @DTS_MEASUREMENT_NOTIFICATION:
+ * &struct iwl_dts_measurement_notif_v1 or
+ * &struct iwl_dts_measurement_notif_v2
+ */
+ DTS_MEASUREMENT_NOTIFICATION = 0xdd,
+
+ /**
+ * @LDBG_CONFIG_CMD: configure continuous trace recording
+ */
+ LDBG_CONFIG_CMD = 0xf6,
+
+ /**
+ * @DEBUG_LOG_MSG: Debugging log data from firmware
+ */
+ DEBUG_LOG_MSG = 0xf7,
+
+ /**
+ * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
+ */
+ MCAST_FILTER_CMD = 0xd0,
+
+ /**
+ * @D3_CONFIG_CMD: &struct iwl_d3_manager_config
+ */
+ D3_CONFIG_CMD = 0xd3,
+
+ /**
+ * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of
+ * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2,
+ * &struct iwl_proto_offload_cmd_v3_small,
+ * &struct iwl_proto_offload_cmd_v3_large
+ */
+ PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+
+ /**
+ * @OFFLOADS_QUERY_CMD:
+ * No data in command, response in &struct iwl_wowlan_status
+ */
+ OFFLOADS_QUERY_CMD = 0xd5,
+
+ /**
+ * @D0I3_END_CMD: End D0i3/D3 state, no command data
+ */
+ D0I3_END_CMD = 0xed,
+
+ /**
+ * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
+ */
+ WOWLAN_PATTERNS = 0xe0,
+
+ /**
+ * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
+ */
+ WOWLAN_CONFIGURATION = 0xe1,
+
+ /**
+ * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd_v4,
+ * &struct iwl_wowlan_rsc_tsc_params_cmd
+ */
+ WOWLAN_TSC_RSC_PARAM = 0xe2,
+
+ /**
+ * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
+ */
+ WOWLAN_TKIP_PARAM = 0xe3,
+
+ /**
+ * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
+ */
+ WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+
+ /**
+ * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
+ */
+ WOWLAN_GET_STATUSES = 0xe5,
+
+ /**
+ * @SCAN_OFFLOAD_PROFILES_QUERY_CMD:
+ * No command data, response is &struct iwl_scan_offload_profiles_query
+ */
+ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
+};
+
+/**
+ * enum iwl_system_subcmd_ids - system group command IDs
+ */
+enum iwl_system_subcmd_ids {
+ /**
+ * @SHARED_MEM_CFG_CMD:
+ * response in &struct iwl_shared_mem_cfg or
+ * &struct iwl_shared_mem_cfg_v2
+ */
+ SHARED_MEM_CFG_CMD = 0x0,
+
+ /**
+ * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd
+ */
+ SOC_CONFIGURATION_CMD = 0x01,
+
+ /**
+ * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
+ */
+ INIT_EXTENDED_CFG_CMD = 0x03,
+
+ /**
+ * @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd
+ */
+ FW_ERROR_RECOVERY_CMD = 0x7,
+
+ /**
+ * @RFI_CONFIG_CMD: &struct iwl_rfi_config_cmd
+ */
+ RFI_CONFIG_CMD = 0xb,
+
+ /**
+ * @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd
+ */
+ RFI_GET_FREQ_TABLE_CMD = 0xc,
+
+ /**
+ * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
+ */
+ SYSTEM_FEATURES_CONTROL_CMD = 0xd,
+
+ /**
+ * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
+ */
+ RFI_DEACTIVATE_NOTIF = 0xff,
+};
+
+#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
new file mode 100644
index 000000000..087354b3c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_config_h__
+#define __iwl_fw_api_config_h__
+
+/*
+ * struct iwl_dqa_enable_cmd
+ * @cmd_queue: the TXQ number of the command queue
+ */
+struct iwl_dqa_enable_cmd {
+ __le32 cmd_queue;
+} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
+
+/*
+ * struct iwl_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwl_tx_ant_cfg_cmd {
+ __le32 valid;
+} __packed;
+
+/**
+ * struct iwl_calib_ctrl - Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers, using &enum iwl_calib_cfg
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers, using &enum iwl_calib_cfg
+ */
+struct iwl_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
+ */
+enum iwl_calib_cfg {
+ IWL_CALIB_CFG_XTAL_IDX = BIT(0),
+ IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
+ IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
+ IWL_CALIB_CFG_PAPD_IDX = BIT(3),
+ IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
+ IWL_CALIB_CFG_DC_IDX = BIT(5),
+ IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
+ IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
+ IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
+ IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
+ IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
+ IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
+ IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
+ IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
+ IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
+ IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
+ IWL_CALIB_CFG_DAC_IDX = BIT(16),
+ IWL_CALIB_CFG_ABS_IDX = BIT(17),
+ IWL_CALIB_CFG_AGC_IDX = BIT(18),
+};
+
+/**
+ * struct iwl_phy_specific_cfg - specific PHY filter configuration
+ *
+ * Sent as part of the phy configuration command (v3) to configure specific FW
+ * defined PHY filters that can be applied to each antenna.
+ *
+ * @filter_cfg_chain_a: filter config id for LMAC1 chain A
+ * @filter_cfg_chain_b: filter config id for LMAC1 chain B
+ * @filter_cfg_chain_c: filter config id for LMAC2 chain A
+ * @filter_cfg_chain_d: filter config id for LMAC2 chain B
+ * values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id
+ */
+struct iwl_phy_specific_cfg {
+ __le32 filter_cfg_chain_a;
+ __le32 filter_cfg_chain_b;
+ __le32 filter_cfg_chain_c;
+ __le32 filter_cfg_chain_d;
+} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
+
+/**
+ * struct iwl_phy_cfg_cmd - Phy configuration command
+ *
+ * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
+ * @calib_control: calibration control data
+ */
+struct iwl_phy_cfg_cmd_v1 {
+ __le32 phy_cfg;
+ struct iwl_calib_ctrl calib_control;
+} __packed;
+
+/**
+ * struct iwl_phy_cfg_cmd_v3 - Phy configuration command (v3)
+ *
+ * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
+ * @calib_control: calibration control data
+ * @phy_specific_cfg: configure predefined PHY filters
+ */
+struct iwl_phy_cfg_cmd_v3 {
+ __le32 phy_cfg;
+ struct iwl_calib_ctrl calib_control;
+ struct iwl_phy_specific_cfg phy_specific_cfg;
+} __packed; /* PHY_CONFIGURATION_CMD_API_S_VER_3 */
+
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+ DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
+ DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+#endif /* __iwl_fw_api_config_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
new file mode 100644
index 000000000..105ba7170
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_context_h__
+#define __iwl_fw_api_context_h__
+
+/**
+ * enum iwl_ctxt_id_and_color - ID and color fields in context dword
+ * @FW_CTXT_ID_POS: position of the ID
+ * @FW_CTXT_ID_MSK: mask of the ID
+ * @FW_CTXT_COLOR_POS: position of the color
+ * @FW_CTXT_COLOR_MSK: mask of the color
+ * @FW_CTXT_INVALID: value used to indicate unused/invalid
+ */
+enum iwl_ctxt_id_and_color {
+ FW_CTXT_ID_POS = 0,
+ FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS,
+ FW_CTXT_COLOR_POS = 8,
+ FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
+ FW_CTXT_INVALID = 0xffffffff,
+};
+
+#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\
+ ((_color) << FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+enum iwl_ctxt_action {
+ FW_CTXT_ACTION_STUB = 0,
+ FW_CTXT_ACTION_ADD,
+ FW_CTXT_ACTION_MODIFY,
+ FW_CTXT_ACTION_REMOVE,
+ FW_CTXT_ACTION_NUM
+}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+#endif /* __iwl_fw_api_context_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
new file mode 100644
index 000000000..8a613e150
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -0,0 +1,863 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_d3_h__
+#define __iwl_fw_api_d3_h__
+#include <iwl-trans.h>
+
+/**
+ * enum iwl_d0i3_flags - d0i3 flags
+ * @IWL_D0I3_RESET_REQUIRE: FW require reset upon resume
+ */
+enum iwl_d0i3_flags {
+ IWL_D0I3_RESET_REQUIRE = BIT(0),
+};
+
+/**
+ * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
+ * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
+ */
+enum iwl_d3_wakeup_flags {
+ IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
+
+/**
+ * struct iwl_d3_manager_config - D3 manager configuration command
+ * @min_sleep_time: minimum sleep time (in usec)
+ * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ * @wakeup_host_timer: force wakeup after this many seconds
+ *
+ * The structure is used for the D3_CONFIG_CMD command.
+ */
+struct iwl_d3_manager_config {
+ __le32 min_sleep_time;
+ __le32 wakeup_flags;
+ __le32 wakeup_host_timer;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
+
+
+/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
+
+/**
+ * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
+ * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
+ * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid
+ * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid
+ */
+enum iwl_proto_offloads {
+ IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
+ IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
+ IWL_D3_PROTO_IPV4_VALID = BIT(2),
+ IWL_D3_PROTO_IPV6_VALID = BIT(3),
+};
+
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
+
+/**
+ * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
+ * @enabled: enable flags
+ * @remote_ipv4_addr: remote address to answer to (or zero if all)
+ * @host_ipv4_addr: our IPv4 address to respond to queries for
+ * @arp_mac_addr: our MAC address for ARP responses
+ * @reserved: unused
+ */
+struct iwl_proto_offload_cmd_common {
+ __le32 enabled;
+ __be32 remote_ipv4_addr;
+ __be32 host_ipv4_addr;
+ u8 arp_mac_addr[ETH_ALEN];
+ __le16 reserved;
+} __packed;
+
+/**
+ * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor solicitation response MAC address
+ * @reserved2: reserved
+ */
+struct iwl_proto_offload_cmd_v1 {
+ struct iwl_proto_offload_cmd_common common;
+ u8 remote_ipv6_addr[16];
+ u8 solicited_node_ipv6_addr[16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
+ u8 ndp_mac_addr[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+
+/**
+ * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor solicitation response MAC address
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @reserved2: reserved
+ */
+struct iwl_proto_offload_cmd_v2 {
+ struct iwl_proto_offload_cmd_common common;
+ u8 remote_ipv6_addr[16];
+ u8 solicited_node_ipv6_addr[16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
+ u8 ndp_mac_addr[ETH_ALEN];
+ u8 num_valid_ipv6_addrs;
+ u8 reserved2[3];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+
+struct iwl_ns_config {
+ struct in6_addr source_ipv6_addr;
+ struct in6_addr dest_ipv6_addr;
+ u8 target_mac_addr[ETH_ALEN];
+ __le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+ struct in6_addr addr;
+ __le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration
+ * @sta_id: station id
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v4 {
+ __le32 sta_id;
+ struct iwl_proto_offload_cmd_common common;
+ __le32 num_valid_ipv6_addrs;
+ struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+ struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */
+
+/*
+ * WOWLAN_PATTERNS
+ */
+#define IWL_WOWLAN_MIN_PATTERN_LEN 16
+#define IWL_WOWLAN_MAX_PATTERN_LEN 128
+
+struct iwl_wowlan_pattern_v1 {
+ u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+ u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
+ u8 mask_size;
+ u8 pattern_size;
+ __le16 reserved;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
+
+#define IWL_WOWLAN_MAX_PATTERNS 20
+
+/**
+ * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
+ */
+struct iwl_wowlan_patterns_cmd_v1 {
+ /**
+ * @n_patterns: number of patterns
+ */
+ __le32 n_patterns;
+
+ /**
+ * @patterns: the patterns, array length in @n_patterns
+ */
+ struct iwl_wowlan_pattern_v1 patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+
+#define IPV4_ADDR_SIZE 4
+#define IPV6_ADDR_SIZE 16
+
+enum iwl_wowlan_pattern_type {
+ WOWLAN_PATTERN_TYPE_BITMASK,
+ WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN,
+ WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN,
+ WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD,
+ WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD,
+}; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data
+ */
+struct iwl_wowlan_ipv4_tcp_syn {
+ /**
+ * @src_addr: source IP address to match
+ */
+ u8 src_addr[IPV4_ADDR_SIZE];
+
+ /**
+ * @dst_addr: destination IP address to match
+ */
+ u8 dst_addr[IPV4_ADDR_SIZE];
+
+ /**
+ * @src_port: source TCP port to match
+ */
+ __le16 src_port;
+
+ /**
+ * @dst_port: destination TCP port to match
+ */
+ __le16 dst_port;
+} __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data
+ */
+struct iwl_wowlan_ipv6_tcp_syn {
+ /**
+ * @src_addr: source IP address to match
+ */
+ u8 src_addr[IPV6_ADDR_SIZE];
+
+ /**
+ * @dst_addr: destination IP address to match
+ */
+ u8 dst_addr[IPV6_ADDR_SIZE];
+
+ /**
+ * @src_port: source TCP port to match
+ */
+ __le16 src_port;
+
+ /**
+ * @dst_port: destination TCP port to match
+ */
+ __le16 dst_port;
+} __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */
+
+/**
+ * union iwl_wowlan_pattern_data - Data for the different pattern types
+ *
+ * If wildcard addresses/ports are to be used, the union can be left
+ * undefined.
+ */
+union iwl_wowlan_pattern_data {
+ /**
+ * @bitmask: bitmask pattern data
+ */
+ struct iwl_wowlan_pattern_v1 bitmask;
+
+ /**
+ * @ipv4_tcp_syn: IPv4 TCP SYN pattern data
+ */
+ struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn;
+
+ /**
+ * @ipv6_tcp_syn: IPv6 TCP SYN pattern data
+ */
+ struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn;
+}; /* WOWLAN_PATTERN_API_U_VER_1 */
+
+/**
+ * struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns
+ */
+struct iwl_wowlan_pattern_v2 {
+ /**
+ * @pattern_type: defines the struct type to be used in the union
+ */
+ u8 pattern_type;
+
+ /**
+ * @reserved: reserved for alignment
+ */
+ u8 reserved[3];
+
+ /**
+ * @u: the union containing the match data, or undefined for
+ * wildcard matches
+ */
+ union iwl_wowlan_pattern_data u;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_2 */
+
+/**
+ * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command
+ */
+struct iwl_wowlan_patterns_cmd {
+ /**
+ * @n_patterns: number of patterns
+ */
+ u8 n_patterns;
+
+ /**
+ * @n_patterns: sta_id
+ */
+ u8 sta_id;
+
+ /**
+ * @reserved: reserved for alignment
+ */
+ __le16 reserved;
+
+ /**
+ * @patterns: the patterns, array length in @n_patterns
+ */
+ struct iwl_wowlan_pattern_v2 patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */
+
+enum iwl_wowlan_wakeup_filters {
+ IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
+ IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
+ IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
+ IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
+ IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+ IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+ IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
+ IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
+ IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
+ IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
+ IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
+ IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
+}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
+
+enum iwl_wowlan_flags {
+ IS_11W_ASSOC = BIT(0),
+ ENABLE_L3_FILTERING = BIT(1),
+ ENABLE_NBNS_FILTERING = BIT(2),
+ ENABLE_DHCP_FILTERING = BIT(3),
+ ENABLE_STORE_BEACON = BIT(4),
+};
+
+/**
+ * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
+ * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
+ * @non_qos_seq: non-QoS sequence counter to use next.
+ * Reserved if the struct has version >= 6.
+ * @qos_seq: QoS sequence counters to use next
+ * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
+ * @is_11n_connection: indicates HT connection
+ * @offloading_tid: TID reserved for firmware use
+ * @flags: extra flags, see &enum iwl_wowlan_flags
+ * @sta_id: station ID for wowlan.
+ * @reserved: reserved
+ */
+struct iwl_wowlan_config_cmd {
+ __le32 wakeup_filter;
+ __le16 non_qos_seq;
+ __le16 qos_seq[8];
+ u8 wowlan_ba_teardown_tids;
+ u8 is_11n_connection;
+ u8 offloading_tid;
+ u8 flags;
+ u8 sta_id;
+ u8 reserved;
+} __packed; /* WOWLAN_CONFIG_API_S_VER_5 */
+
+#define IWL_NUM_RSC 16
+#define WOWLAN_KEY_MAX_SIZE 32
+#define WOWLAN_GTK_KEYS_NUM 2
+#define WOWLAN_IGTK_KEYS_NUM 2
+
+/*
+ * WOWLAN_TSC_RSC_PARAMS
+ */
+struct tkip_sc {
+ __le16 iv16;
+ __le16 pad;
+ __le32 iv32;
+} __packed; /* TKIP_SC_API_U_VER_1 */
+
+struct iwl_tkip_rsc_tsc {
+ struct tkip_sc unicast_rsc[IWL_NUM_RSC];
+ struct tkip_sc multicast_rsc[IWL_NUM_RSC];
+ struct tkip_sc tsc;
+} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
+
+struct aes_sc {
+ __le64 pn;
+} __packed; /* TKIP_AES_SC_API_U_VER_1 */
+
+struct iwl_aes_rsc_tsc {
+ struct aes_sc unicast_rsc[IWL_NUM_RSC];
+ struct aes_sc multicast_rsc[IWL_NUM_RSC];
+ struct aes_sc tsc;
+} __packed; /* AES_TSC_RSC_API_S_VER_1 */
+
+union iwl_all_tsc_rsc {
+ struct iwl_tkip_rsc_tsc tkip;
+ struct iwl_aes_rsc_tsc aes;
+}; /* ALL_TSC_RSC_API_S_VER_2 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 {
+ union iwl_all_tsc_rsc all_tsc_rsc;
+} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd_v4 {
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params;
+ __le32 sta_id;
+} __packed; /* ALL_TSC_RSC_API_S_VER_4 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd {
+ __le64 ucast_rsc[IWL_MAX_TID_COUNT];
+ __le64 mcast_rsc[WOWLAN_GTK_KEYS_NUM][IWL_MAX_TID_COUNT];
+ __le32 sta_id;
+#define IWL_MCAST_KEY_MAP_INVALID 0xff
+ u8 mcast_key_id_map[4];
+} __packed; /* ALL_TSC_RSC_API_S_VER_5 */
+
+#define IWL_MIC_KEY_SIZE 8
+struct iwl_mic_keys {
+ u8 tx[IWL_MIC_KEY_SIZE];
+ u8 rx_unicast[IWL_MIC_KEY_SIZE];
+ u8 rx_mcast[IWL_MIC_KEY_SIZE];
+} __packed; /* MIC_KEYS_API_S_VER_1 */
+
+#define IWL_P1K_SIZE 5
+struct iwl_p1k_cache {
+ __le16 p1k[IWL_P1K_SIZE];
+} __packed;
+
+#define IWL_NUM_RX_P1K_CACHE 2
+
+struct iwl_wowlan_tkip_params_cmd_ver_1 {
+ struct iwl_mic_keys mic_keys;
+ struct iwl_p1k_cache tx;
+ struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+ struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+
+struct iwl_wowlan_tkip_params_cmd {
+ struct iwl_mic_keys mic_keys;
+ struct iwl_p1k_cache tx;
+ struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+ struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+ u8 reversed[2];
+ __le32 sta_id;
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_2 */
+
+#define IWL_KCK_MAX_SIZE 32
+#define IWL_KEK_MAX_SIZE 32
+
+struct iwl_wowlan_kek_kck_material_cmd_v2 {
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+
+struct iwl_wowlan_kek_kck_material_cmd_v3 {
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+ __le32 akm;
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
+
+struct iwl_wowlan_kek_kck_material_cmd_v4 {
+ __le32 sta_id;
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+ __le32 akm;
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */
+
+struct iwl_wowlan_get_status_cmd {
+ __le32 sta_id;
+} __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */
+
+#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
+
+enum iwl_wowlan_rekey_status {
+ IWL_WOWLAN_REKEY_POST_REKEY = 0,
+ IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
+}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
+
+enum iwl_wowlan_wakeup_reason {
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
+ IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
+ IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
+ IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
+ IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
+ IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
+ IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
+ IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
+ IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
+ IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17),
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18),
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19),
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20),
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21),
+}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
+
+struct iwl_wowlan_gtk_status_v1 {
+ u8 key_index;
+ u8 reserved[3];
+ u8 decrypt_key[16];
+ u8 tkip_mic_key[8];
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
+
+/**
+ * struct iwl_wowlan_gtk_status_v2 - GTK status
+ * @key: GTK material
+ * @key_len: GTK legth, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @rsc: TSC RSC counters
+ */
+struct iwl_wowlan_gtk_status_v2 {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 reserved[2];
+ u8 tkip_mic_key[8];
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
+
+/**
+ * struct iwl_wowlan_all_rsc_tsc_v5 - key counters
+ * @ucast_rsc: unicast RSC values
+ * @mcast_rsc: multicast RSC values (per key map value)
+ * @sta_id: station ID
+ * @mcast_key_id_map: map of key id to @mcast_rsc entry
+ */
+struct iwl_wowlan_all_rsc_tsc_v5 {
+ __le64 ucast_rsc[IWL_MAX_TID_COUNT];
+ __le64 mcast_rsc[2][IWL_MAX_TID_COUNT];
+ __le32 sta_id;
+ u8 mcast_key_id_map[4];
+} __packed; /* ALL_TSC_RSC_API_S_VER_5 */
+
+/**
+ * struct iwl_wowlan_gtk_status_v3 - GTK status
+ * @key: GTK material
+ * @key_len: GTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @sc: RSC/TSC counters
+ */
+struct iwl_wowlan_gtk_status_v3 {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 reserved[2];
+ u8 tkip_mic_key[IWL_MIC_KEY_SIZE];
+ struct iwl_wowlan_all_rsc_tsc_v5 sc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */
+
+#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
+
+/**
+ * struct iwl_wowlan_igtk_status - IGTK status
+ * @key: IGTK material
+ * @ipn: the IGTK packet number (replay counter)
+ * @key_len: IGTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
+ * bits[1:5]: IGTK index of the key in the internal DB
+ * bit[6]: Set iff this is the currently used IGTK
+ */
+struct iwl_wowlan_igtk_status {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 ipn[6];
+ u8 key_len;
+ u8 key_flags;
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+
+/**
+ * struct iwl_wowlan_status_v6 - WoWLAN status
+ * @gtk: GTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v6 {
+ struct iwl_wowlan_gtk_status_v1 gtk;
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
+/**
+ * struct iwl_wowlan_status_v7 - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v7 {
+ struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
+
+/**
+ * struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10)
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next.
+ * Reserved if the struct has version >= 10.
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v9 {
+ struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
+
+/**
+ * struct iwl_wowlan_status_v12 - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next.
+ * Reserved if the struct has version >= 10.
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v12 {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
+
+/**
+ * struct iwl_wowlan_info_notif_v1 - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @reserved1: reserved
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @reserved2: reserved
+ */
+struct iwl_wowlan_info_notif_v1 {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 reserved2[2];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_info_notif - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @reserved1: reserved
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @reserved2: reserved
+ */
+struct iwl_wowlan_info_notif {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 reserved2[2];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */
+
+/**
+ * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
+ * @wake_packet_length: wakeup packet length
+ * @station_id: station id
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_wake_pkt_notif {
+ __le32 wake_packet_length;
+ u8 station_id;
+ u8 reserved[3];
+ u8 wake_packet[1];
+} __packed; /* WOWLAN_WAKE_PKT_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_d3_end_notif - d3 end notification
+ * @flags: See &enum iwl_d0i3_flags
+ */
+struct iwl_mvm_d3_end_notif {
+ __le32 flags;
+} __packed;
+
+/* TODO: NetDetect API */
+
+#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
new file mode 100644
index 000000000..43619acc2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_datapath_h__
+#define __iwl_fw_api_datapath_h__
+
+/**
+ * enum iwl_data_path_subcmd_ids - data path group commands
+ */
+enum iwl_data_path_subcmd_ids {
+ /**
+ * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
+ */
+ DQA_ENABLE_CMD = 0x0,
+
+ /**
+ * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd
+ */
+ UPDATE_MU_GROUPS_CMD = 0x1,
+
+ /**
+ * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd
+ */
+ TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
+
+ /**
+ * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+ */
+ STA_HE_CTXT_CMD = 0x7,
+
+ /**
+ * @RLC_CONFIG_CMD: &struct iwl_rlc_config_cmd
+ */
+ RLC_CONFIG_CMD = 0x8,
+
+ /**
+ * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+ */
+ RFH_QUEUE_CONFIG_CMD = 0xD,
+
+ /**
+ * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
+ */
+ TLC_MNG_CONFIG_CMD = 0xF,
+
+ /**
+ * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd
+ */
+ HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
+
+ /**
+ * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI
+ * matrix collection, uses &struct iwl_channel_estimation_cfg
+ */
+ CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
+
+ /**
+ * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
+ * blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
+ * command, and &struct iwl_rx_baid_cfg_resp as a response.
+ */
+ RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
+
+ /**
+ * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
+ * command, uses &struct iwl_scd_queue_cfg_cmd and the response
+ * is (same as before) &struct iwl_tx_queue_cfg_rsp.
+ */
+ SCD_QUEUE_CONFIG_CMD = 0x17,
+
+ /**
+ * @MONITOR_NOTIF: Datapath monitoring notification, using
+ * &struct iwl_datapath_monitor_notif
+ */
+ MONITOR_NOTIF = 0xF4,
+
+ /**
+ * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
+ */
+ RX_NO_DATA_NOTIF = 0xF5,
+
+ /**
+ * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * &struct iwl_thermal_dual_chain_request
+ */
+ THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
+
+ /**
+ * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+ */
+ TLC_MNG_UPDATE_NOTIF = 0xF7,
+
+ /**
+ * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
+ */
+ STA_PM_NOTIF = 0xFD,
+
+ /**
+ * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif
+ */
+ MU_GROUP_MGMT_NOTIF = 0xFE,
+
+ /**
+ * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification
+ */
+ RX_QUEUES_NOTIFICATION = 0xFF,
+};
+
+/**
+ * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration
+ *
+ * @reserved: reserved
+ * @membership_status: a bitmap of MU groups
+ * @user_position:the position of station in a group. If the station is in the
+ * group then bits (group * 2) is the position -1
+ */
+struct iwl_mu_group_mgmt_cmd {
+ __le32 reserved;
+ __le32 membership_status[2];
+ __le32 user_position[4];
+} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
+
+/**
+ * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
+ *
+ * @membership_status: a bitmap of MU groups
+ * @user_position: the position of station in a group. If the station is in the
+ * group then bits (group * 2) is the position -1
+ */
+struct iwl_mu_group_mgmt_notif {
+ __le32 membership_status[2];
+ __le32 user_position[4];
+} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
+
+enum iwl_channel_estimation_flags {
+ IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0),
+ IWL_CHANNEL_ESTIMATION_TIMER = BIT(1),
+ IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2),
+};
+
+/**
+ * struct iwl_channel_estimation_cfg - channel estimation reporting config
+ */
+struct iwl_channel_estimation_cfg {
+ /**
+ * @flags: flags, see &enum iwl_channel_estimation_flags
+ */
+ __le32 flags;
+ /**
+ * @timer: if enabled via flags, automatically disable after this many
+ * microseconds
+ */
+ __le32 timer;
+ /**
+ * @count: if enabled via flags, automatically disable after this many
+ * frames with channel estimation matrix were captured
+ */
+ __le32 count;
+ /**
+ * @rate_n_flags_mask: only try to record the channel estimation matrix
+ * if the rate_n_flags value for the received frame (let's call
+ * that rx_rnf) matches the mask/value given here like this:
+ * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val.
+ */
+ __le32 rate_n_flags_mask;
+ /**
+ * @rate_n_flags_val: see @rate_n_flags_mask
+ */
+ __le32 rate_n_flags_val;
+ /**
+ * @reserved: reserved (for alignment)
+ */
+ __le32 reserved;
+ /**
+ * @frame_types: bitmap of frame types to capture, the received frame's
+ * subtype|type takes 6 bits in the frame and the corresponding bit
+ * in this field must be set to 1 to capture channel estimation for
+ * that frame type. Set to all-ones to enable capturing for all
+ * frame types.
+ */
+ __le64 frame_types;
+} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
+
+enum iwl_datapath_monitor_notif_type {
+ IWL_DP_MON_NOTIF_TYPE_EXT_CCA,
+};
+
+struct iwl_datapath_monitor_notif {
+ __le32 type;
+ u8 mac_id;
+ u8 reserved[3];
+} __packed; /* MONITOR_NTF_API_S_VER_1 */
+
+/**
+ * enum iwl_thermal_dual_chain_req_events - firmware SMPS request event
+ * @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation
+ * (subject to other constraints)
+ * @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation
+ * (static SMPS)
+ */
+enum iwl_thermal_dual_chain_req_events {
+ THERMAL_DUAL_CHAIN_REQ_ENABLE,
+ THERMAL_DUAL_CHAIN_REQ_DISABLE,
+}; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */
+
+/**
+ * struct iwl_thermal_dual_chain_request - SMPS request
+ * @event: the type of request, see &enum iwl_thermal_dual_chain_req_events
+ */
+struct iwl_thermal_dual_chain_request {
+ __le32 event;
+} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
+
+enum iwl_rlc_chain_info {
+ IWL_RLC_CHAIN_INFO_DRIVER_FORCE = BIT(0),
+ IWL_RLC_CHAIN_INFO_VALID = 0x000e,
+ IWL_RLC_CHAIN_INFO_FORCE = 0x0070,
+ IWL_RLC_CHAIN_INFO_FORCE_MIMO = 0x0380,
+ IWL_RLC_CHAIN_INFO_COUNT = 0x0c00,
+ IWL_RLC_CHAIN_INFO_MIMO_COUNT = 0x3000,
+};
+
+/**
+ * struct iwl_rlc_properties - RLC properties
+ * @rx_chain_info: RX chain info, &enum iwl_rlc_chain_info
+ * @reserved: reserved
+ */
+struct iwl_rlc_properties {
+ __le32 rx_chain_info;
+ __le32 reserved;
+} __packed; /* RLC_PROPERTIES_S_VER_1 */
+
+enum iwl_sad_mode {
+ IWL_SAD_MODE_ENABLED = BIT(0),
+ IWL_SAD_MODE_DEFAULT_ANT_MSK = 0x6,
+ IWL_SAD_MODE_DEFAULT_ANT_FW = 0x0,
+ IWL_SAD_MODE_DEFAULT_ANT_A = 0x2,
+ IWL_SAD_MODE_DEFAULT_ANT_B = 0x4,
+};
+
+/**
+ * struct iwl_sad_properties - SAD properties
+ * @chain_a_sad_mode: chain A SAD mode, &enum iwl_sad_mode
+ * @chain_b_sad_mode: chain B SAD mode, &enum iwl_sad_mode
+ * @mac_id: MAC index
+ * @reserved: reserved
+ */
+struct iwl_sad_properties {
+ __le32 chain_a_sad_mode;
+ __le32 chain_b_sad_mode;
+ __le32 mac_id;
+ __le32 reserved;
+} __packed;
+
+/**
+ * struct iwl_rlc_config_cmd - RLC configuration
+ * @phy_id: PHY index
+ * @rlc: RLC properties, &struct iwl_rlc_properties
+ * @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties
+ * @flags: flags, &enum iwl_rlc_flags
+ * @reserved: reserved
+ */
+struct iwl_rlc_config_cmd {
+ __le32 phy_id;
+ struct iwl_rlc_properties rlc;
+ struct iwl_sad_properties sad;
+ u8 flags;
+ u8 reserved[3];
+} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+
+#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * enum iwl_rx_baid_action - BAID allocation/config action
+ * @IWL_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+enum iwl_rx_baid_action {
+ IWL_RX_BAID_ACTION_ADD,
+ IWL_RX_BAID_ACTION_MODIFY,
+ IWL_RX_BAID_ACTION_REMOVE,
+}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwl_rx_baid_cfg_cmd_alloc {
+ __le32 sta_id_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le16 ssn;
+ __le16 win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwl_rx_baid_cfg_cmd_modify {
+ __le32 old_sta_id_mask;
+ __le32 new_sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove_v1 {
+ __le32 baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove {
+ __le32 sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwl_rx_baid_action
+ */
+struct iwl_rx_baid_cfg_cmd {
+ __le32 action;
+ union {
+ struct iwl_rx_baid_cfg_cmd_alloc alloc;
+ struct iwl_rx_baid_cfg_cmd_modify modify;
+ struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
+ struct iwl_rx_baid_cfg_cmd_remove remove;
+ }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwl_rx_baid_cfg_resp {
+ __le32 baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * enum iwl_scd_queue_cfg_operation - scheduler queue operation
+ * @IWL_SCD_QUEUE_ADD: allocate a new queue
+ * @IWL_SCD_QUEUE_REMOVE: remove a queue
+ * @IWL_SCD_QUEUE_MODIFY: modify a queue
+ */
+enum iwl_scd_queue_cfg_operation {
+ IWL_SCD_QUEUE_ADD = 0,
+ IWL_SCD_QUEUE_REMOVE = 1,
+ IWL_SCD_QUEUE_MODIFY = 2,
+};
+
+/**
+ * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.queue: queue ID for removal
+ * @u.modify.sta_mask: new station mask for modify
+ * @u.modify.queue: queue ID to modify
+ */
+struct iwl_scd_queue_cfg_cmd {
+ __le32 operation;
+ union {
+ struct {
+ __le32 sta_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le32 flags;
+ __le32 cb_size;
+ __le64 bc_dram_addr;
+ __le64 tfdq_dram_addr;
+ } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+ struct {
+ __le32 queue;
+ } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+ struct {
+ __le32 sta_mask;
+ __le32 queue;
+ } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
+#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
new file mode 100644
index 000000000..39bee9c00
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2018-2022 Intel Corporation
+ */
+#ifndef __iwl_fw_dbg_tlv_h__
+#define __iwl_fw_dbg_tlv_h__
+
+#include <linux/bitops.h>
+
+#define IWL_FW_INI_MAX_REGION_ID 64
+#define IWL_FW_INI_MAX_NAME 32
+#define IWL_FW_INI_MAX_CFG_NAME 64
+#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
+#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
+#define IWL_FW_INI_PRESET_DISABLE 0xff
+
+/**
+ * struct iwl_fw_ini_hcmd
+ *
+ * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC
+ * @group: the desired cmd group
+ * @reserved: to align to FW struct
+ * @data: all of the relevant command data to be sent
+ */
+struct iwl_fw_ini_hcmd {
+ u8 id;
+ u8 group;
+ __le16 reserved;
+ u8 data[];
+} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_header - Common Header for all ini debug TLV's structures
+ *
+ * @version: TLV version
+ * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain
+ */
+struct iwl_fw_ini_header {
+ __le32 version;
+ __le32 domain;
+ /* followed by the data */
+} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses
+ *
+ * @size: size of each memory chunk
+ * @offset: offset to add to the base address of each chunk
+ */
+struct iwl_fw_ini_region_dev_addr {
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_fifos - Configuration to read Tx/Rx fifos
+ *
+ * @fid: fifos ids array. Used to determine what fifos to collect
+ * @hdr_only: if non zero, collect only the registers
+ * @offset: offset to add to the registers addresses
+ */
+struct iwl_fw_ini_region_fifos {
+ __le32 fid[2];
+ __le32 hdr_only;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_FIFOS_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_err_table - error table region data
+ *
+ * Configuration to read Umac/Lmac error table
+ *
+ * @version: version of the error table
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
+ */
+struct iwl_fw_ini_region_err_table {
+ __le32 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_special_device_memory - special device memory
+ *
+ * Configuration to read a special memory
+ *
+ * @type: type of the special memory
+ * @version: version of the special memory
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
+ */
+struct iwl_fw_ini_region_special_device_memory {
+ __le16 type;
+ __le16 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_SPECIAL_DEVICE_ADDR_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_internal_buffer - internal buffer region data
+ *
+ * Configuration to read internal monitor buffer
+ *
+ * @alloc_id: allocation id one of &enum iwl_fw_ini_allocation_id
+ * @base_addr: internal buffer base address
+ * @size: size internal buffer size
+ */
+struct iwl_fw_ini_region_internal_buffer {
+ __le32 alloc_id;
+ __le32 base_addr;
+ __le32 size;
+} __packed; /* FW_TLV_DEBUG_REGION_INTERNAL_BUFFER_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_tlv - region TLV
+ *
+ * Configures parameters for region data collection
+ *
+ * @hdr: debug header
+ * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @type: region type. One of &enum iwl_fw_ini_region_type
+ * @sub_type: region sub type
+ * @sub_type_ver: region sub type version
+ * @reserved: not in use
+ * @name: region name
+ * @dev_addr: device address configuration. Used by
+ * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
+ * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
+ * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
+ * &IWL_FW_INI_REGION_RXF
+ * @err_table: error table configuration. Used by
+ * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * @internal_buffer: internal monitor buffer configuration. Used by
+ * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
+ * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * @addrs: array of addresses attached to the end of the region tlv
+ */
+struct iwl_fw_ini_region_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 id;
+ u8 type;
+ u8 sub_type;
+ u8 sub_type_ver;
+ u8 reserved;
+ u8 name[IWL_FW_INI_MAX_NAME];
+ union {
+ struct iwl_fw_ini_region_dev_addr dev_addr;
+ struct iwl_fw_ini_region_fifos fifos;
+ struct iwl_fw_ini_region_err_table err_table;
+ struct iwl_fw_ini_region_internal_buffer internal_buffer;
+ struct iwl_fw_ini_region_special_device_memory special_mem;
+ __le32 dram_alloc_id;
+ __le32 tlv_mask;
+ }; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
+ __le32 addrs[];
+} __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_debug_info_tlv
+ *
+ * debug configuration name for a specific image
+ *
+ * @hdr: debug header
+ * @image_type: image type
+ * @debug_cfg_name: debug configuration name
+ */
+struct iwl_fw_ini_debug_info_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 image_type;
+ u8 debug_cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed; /* FW_TLV_DEBUG_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_allocation_tlv - Allocates DRAM buffers
+ *
+ * @hdr: debug header
+ * @alloc_id: allocation id. One of &enum iwl_fw_ini_allocation_id
+ * @buf_location: buffer location. One of &enum iwl_fw_ini_buffer_location
+ * @req_size: requested buffer size
+ * @max_frags_num: maximum number of fragments
+ * @min_size: minimum buffer size
+ */
+struct iwl_fw_ini_allocation_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 alloc_id;
+ __le32 buf_location;
+ __le32 req_size;
+ __le32 max_frags_num;
+ __le32 min_size;
+} __packed; /* FW_TLV_DEBUG_BUFFER_ALLOCATION_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_trigger_tlv - trigger TLV
+ *
+ * Trigger that upon firing, determines what regions to collect
+ *
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @trigger_reason: trigger reason
+ * @apply_policy: uses &enum iwl_fw_ini_trigger_apply_policy
+ * @dump_delay: delay from trigger fire to dump, in usec
+ * @occurrences: max trigger fire occurrences allowed
+ * @reserved: unused
+ * @ignore_consec: ignore consecutive triggers, in usec
+ * @reset_fw: if non zero, will reset and reload the FW
+ * @multi_dut: initiate debug dump data on several DUTs
+ * @regions_mask: mask of regions to collect
+ * @data: trigger data
+ */
+struct iwl_fw_ini_trigger_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 trigger_reason;
+ __le32 apply_policy;
+ __le32 dump_delay;
+ __le32 occurrences;
+ __le32 reserved;
+ __le32 ignore_consec;
+ __le32 reset_fw;
+ __le32 multi_dut;
+ __le64 regions_mask;
+ __le32 data[];
+} __packed; /* FW_TLV_DEBUG_TRIGGER_API_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_hcmd_tlv - Generic Host command pass through TLV
+ *
+ * @hdr: debug header
+ * @time_point: time point. One of &enum iwl_fw_ini_time_point
+ * @period_msec: interval at which the hcmd will be sent to the FW.
+ * Measured in msec (0 = one time command)
+ * @hcmd: a variable length host-command to be sent to apply the configuration
+ */
+struct iwl_fw_ini_hcmd_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 period_msec;
+ struct iwl_fw_ini_hcmd hcmd;
+} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
+
+/**
+* struct iwl_fw_ini_addr_val - Address and value to set it to
+*
+* @address: the base address
+* @value: value to set at address
+*/
+struct iwl_fw_ini_addr_val {
+ __le32 address;
+ __le32 value;
+} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */
+
+/**
+ * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory.
+ *
+ * @hdr: debug header
+ * @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point
+ * @set_type: write access type preset token for time point.
+ * one of &enum iwl_fw_ini_config_set_type
+ * @addr_offset: the offset to add to any item in address[0] field
+ * @addr_val: address value pair
+ */
+struct iwl_fw_ini_conf_set_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 set_type;
+ __le32 addr_offset;
+ struct iwl_fw_ini_addr_val addr_val[];
+} __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */
+
+/**
+ * enum iwl_fw_ini_config_set_type
+ *
+ * @IWL_FW_INI_CONFIG_SET_TYPE_INVALID: invalid config set
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: for PERIPHERY MAC configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY: for PERIPHERY PHY configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX: for PERIPHERY AUX configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: for DEVICE MEMORY configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
+ * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+*/
+
+enum iwl_fw_ini_config_set_type {
+ IWL_FW_INI_CONFIG_SET_TYPE_INVALID = 0,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY,
+ IWL_FW_INI_CONFIG_SET_TYPE_CSR,
+ IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR,
+ IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM,
+ IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM,
+} __packed;
+
+/**
+ * enum iwl_fw_ini_allocation_id
+ *
+ * @IWL_FW_INI_ALLOCATION_INVALID: invalid
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC4: allocation meant for DBGC4 configuration
+ * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
+*/
+enum iwl_fw_ini_allocation_id {
+ IWL_FW_INI_ALLOCATION_INVALID,
+ IWL_FW_INI_ALLOCATION_ID_DBGC1,
+ IWL_FW_INI_ALLOCATION_ID_DBGC2,
+ IWL_FW_INI_ALLOCATION_ID_DBGC3,
+ IWL_FW_INI_ALLOCATION_ID_DBGC4,
+ IWL_FW_INI_ALLOCATION_NUM,
+}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_buffer_location
+ *
+ * @IWL_FW_INI_LOCATION_INVALID: invalid
+ * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
+ * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
+ * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
+ */
+enum iwl_fw_ini_buffer_location {
+ IWL_FW_INI_LOCATION_INVALID,
+ IWL_FW_INI_LOCATION_SRAM_PATH,
+ IWL_FW_INI_LOCATION_DRAM_PATH,
+ IWL_FW_INI_LOCATION_NPK_PATH,
+ IWL_FW_INI_LOCATION_NUM,
+}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_region_type
+ *
+ * @IWL_FW_INI_REGION_INVALID: invalid
+ * @IWL_FW_INI_REGION_TLV: uCode and debug TLVs
+ * @IWL_FW_INI_REGION_INTERNAL_BUFFER: monitor SMEM buffer
+ * @IWL_FW_INI_REGION_DRAM_BUFFER: monitor DRAM buffer
+ * @IWL_FW_INI_REGION_TXF: TX fifos
+ * @IWL_FW_INI_REGION_RXF: RX fifo
+ * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
+ * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
+ * @IWL_FW_INI_REGION_RSP_OR_NOTIF: FW response or notification data
+ * @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
+ * @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
+ * @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
+ * @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
+ * @IWL_FW_INI_REGION_PAGING: paging memory
+ * @IWL_FW_INI_REGION_CSR: CSR registers
+ * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
+ * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
+ * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory
+ * @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM
+ * @IWL_FW_INI_REGION_NUM: number of region types
+ */
+enum iwl_fw_ini_region_type {
+ IWL_FW_INI_REGION_INVALID,
+ IWL_FW_INI_REGION_TLV,
+ IWL_FW_INI_REGION_INTERNAL_BUFFER,
+ IWL_FW_INI_REGION_DRAM_BUFFER,
+ IWL_FW_INI_REGION_TXF,
+ IWL_FW_INI_REGION_RXF,
+ IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_RSP_OR_NOTIF,
+ IWL_FW_INI_REGION_DEVICE_MEMORY,
+ IWL_FW_INI_REGION_PERIPHERY_MAC,
+ IWL_FW_INI_REGION_PERIPHERY_PHY,
+ IWL_FW_INI_REGION_PERIPHERY_AUX,
+ IWL_FW_INI_REGION_PAGING,
+ IWL_FW_INI_REGION_CSR,
+ IWL_FW_INI_REGION_DRAM_IMR,
+ IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
+ IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY,
+ IWL_FW_INI_REGION_DBGI_SRAM,
+ IWL_FW_INI_REGION_NUM
+}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
+
+enum iwl_fw_ini_region_device_memory_subtype {
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18,
+ IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20,
+}; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */
+
+/**
+ * enum iwl_fw_ini_time_point
+ *
+ * Hard coded time points in which the driver can send hcmd or perform dump
+ * collection
+ *
+ * @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW
+ * @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif
+ * @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence
+ * @IWL_FW_INI_TIME_POINT_FW_ASSERT: FW assert
+ * @IWL_FW_INI_TIME_POINT_FW_HW_ERROR: FW HW error
+ * @IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG: TFD queue hang
+ * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: DHC cmd response and notif
+ * @IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: FW response or notification.
+ * data field holds id and group
+ * @IWL_FW_INI_TIME_POINT_USER_TRIGGER: user trigger time point
+ * @IWL_FW_INI_TIME_POINT_PERIODIC: periodic timepoint that fires in constant
+ * intervals. data field holds the interval time in msec
+ * @IWL_FW_INI_TIME_POINT_RESERVED: reserved
+ * @IWL_FW_INI_TIME_POINT_HOST_ASSERT: Unused
+ * @IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT: alive timeout
+ * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE: device enable
+ * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE: device disable
+ * @IWL_FW_INI_TIME_POINT_HOST_D3_START: D3 start
+ * @IWL_FW_INI_TIME_POINT_HOST_D3_END: D3 end
+ * @IWL_FW_INI_TIME_POINT_MISSED_BEACONS: missed beacons
+ * @IWL_FW_INI_TIME_POINT_ASSOC_FAILED: association failure
+ * @IWL_FW_INI_TIME_POINT_TX_FAILED: Tx frame failed
+ * @IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED: wifi direct action
+ * frame failed
+ * @IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD: Tx latency threshold
+ * @IWL_FW_INI_TIME_POINT_HANG_OCCURRED: hang occurred
+ * @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed
+ * @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx
+ * @IWL_FW_INI_TIME_POINT_DEASSOC: de association
+ * @IWL_FW_INI_TIME_POINT_NUM: number of time points
+ */
+enum iwl_fw_ini_time_point {
+ IWL_FW_INI_TIME_POINT_INVALID,
+ IWL_FW_INI_TIME_POINT_EARLY,
+ IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
+ IWL_FW_INI_TIME_POINT_POST_INIT,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ IWL_FW_INI_TIME_POINT_FW_HW_ERROR,
+ IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG,
+ IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION,
+ IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF,
+ IWL_FW_INI_TIME_POINT_USER_TRIGGER,
+ IWL_FW_INI_TIME_POINT_PERIODIC,
+ IWL_FW_INI_TIME_POINT_RESERVED,
+ IWL_FW_INI_TIME_POINT_HOST_ASSERT,
+ IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
+ IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE,
+ IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
+ IWL_FW_INI_TIME_POINT_HOST_D3_START,
+ IWL_FW_INI_TIME_POINT_HOST_D3_END,
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS,
+ IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
+ IWL_FW_INI_TIME_POINT_TX_FAILED,
+ IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED,
+ IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD,
+ IWL_FW_INI_TIME_POINT_HANG_OCCURRED,
+ IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
+ IWL_FW_INI_TIME_POINT_FAKE_TX,
+ IWL_FW_INI_TIME_POINT_DEASSOC,
+ IWL_FW_INI_TIME_POINT_NUM,
+}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
+
+/**
+ * enum iwl_fw_ini_trigger_apply_policy - Determines how to apply triggers
+ *
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT: match by time point
+ * @IWL_FW_INI_APPLY_POLICY_MATCH_DATA: match by trigger data
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS: override regions mask.
+ * Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
+ * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
+ * Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
+ */
+enum iwl_fw_ini_trigger_apply_policy {
+ IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
+ IWL_FW_INI_APPLY_POLICY_MATCH_DATA = BIT(1),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
+ IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+ IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
+};
+
+/**
+ * enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset
+ *
+ * @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default)
+ * @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW
+ * @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW
+ */
+enum iwl_fw_ini_trigger_reset_fw_policy {
+ IWL_FW_INI_RESET_FW_MODE_NOTHING = 0,
+ IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
+ IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
+};
+
+/**
+ * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
+ *
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ */
+enum iwl_fw_ini_dump_policy {
+ IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
+ IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
+ IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
+
+};
+
+/**
+ * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
+ *
+ * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
+ * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_VERBOSE : dump all regions
+ */
+enum iwl_fw_ini_dump_type {
+ IWL_FW_INI_DUMP_BRIEF,
+ IWL_FW_INI_DUMP_MEDIUM,
+ IWL_FW_INI_DUMP_VERBOSE,
+};
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
new file mode 100644
index 000000000..0c555089e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_debug_h__
+#define __iwl_fw_api_debug_h__
+
+/**
+ * enum iwl_debug_cmds - debug commands
+ */
+enum iwl_debug_cmds {
+ /**
+ * @LMAC_RD_WR:
+ * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+ * &struct iwl_dbg_mem_access_rsp
+ */
+ LMAC_RD_WR = 0x0,
+ /**
+ * @UMAC_RD_WR:
+ * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+ * &struct iwl_dbg_mem_access_rsp
+ */
+ UMAC_RD_WR = 0x1,
+ /**
+ * @HOST_EVENT_CFG:
+ * updates the enabled event severities
+ * &struct iwl_dbg_host_event_cfg_cmd
+ */
+ HOST_EVENT_CFG = 0x3,
+ /**
+ * @DBGC_SUSPEND_RESUME:
+ * DBGC suspend/resume commad. Uses a single dword as data:
+ * 0 - resume DBGC recording
+ * 1 - suspend DBGC recording
+ */
+ DBGC_SUSPEND_RESUME = 0x7,
+ /**
+ * @BUFFER_ALLOCATION:
+ * passes DRAM buffers to a DBGC
+ * &struct iwl_buf_alloc_cmd
+ */
+ BUFFER_ALLOCATION = 0x8,
+ /**
+ * @FW_DUMP_COMPLETE_CMD:
+ * sends command to fw once dump collection completed
+ * &struct iwl_dbg_dump_complete_cmd
+ */
+ FW_DUMP_COMPLETE_CMD = 0xB,
+ /**
+ * @MFU_ASSERT_DUMP_NTF:
+ * &struct iwl_mfu_assert_dump_notif
+ */
+ MFU_ASSERT_DUMP_NTF = 0xFE,
+};
+
+/* Error response/notification */
+enum {
+ FW_ERR_UNKNOWN_CMD = 0x0,
+ FW_ERR_INVALID_CMD_PARAM = 0x1,
+ FW_ERR_SERVICE = 0x2,
+ FW_ERR_ARC_MEMORY = 0x3,
+ FW_ERR_ARC_CODE = 0x4,
+ FW_ERR_WATCH_DOG = 0x5,
+ FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+ FW_ERR_WEP_KEY_SIZE = 0x11,
+ FW_ERR_OBSOLETE_FUNC = 0x12,
+ FW_ERR_UNEXPECTED = 0xFE,
+ FW_ERR_FATAL = 0xFF
+};
+
+/** enum iwl_dbg_suspend_resume_cmds - dbgc suspend resume operations
+ * dbgc suspend resume command operations
+ * @DBGC_RESUME_CMD: resume dbgc recording
+ * @DBGC_SUSPEND_CMD: stop dbgc recording
+ */
+enum iwl_dbg_suspend_resume_cmds {
+ DBGC_RESUME_CMD,
+ DBGC_SUSPEND_CMD,
+};
+
+/**
+ * struct iwl_error_resp - FW error indication
+ * ( REPLY_ERROR = 0x2 )
+ * @error_type: one of FW_ERR_*
+ * @cmd_id: the command ID for which the error occurred
+ * @reserved1: reserved
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwl_error_resp {
+ __le32 error_type;
+ u8 cmd_id;
+ u8 reserved1;
+ __le16 bad_cmd_seq_num;
+ __le32 error_service;
+ __le64 timestamp;
+} __packed;
+
+#define TX_FIFO_MAX_NUM_9000 8
+#define TX_FIFO_MAX_NUM 15
+#define RX_FIFO_MAX_NUM 2
+#define TX_FIFO_INTERNAL_MAX_NUM 6
+
+/**
+ * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information
+ *
+ * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
+ * accessible)
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
+ * 0x0 as accessible only via DBGM RDAT)
+ * @sample_buff_size: internal sample buff size
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
+ * 8000 HW set to 0x0 as not accessible)
+ * @txfifo_size: size of TXF0 ... TXF7
+ * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ * when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ * set, the last 3 members don't exist.
+ */
+struct iwl_shared_mem_cfg_v2 {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
+ __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+ __le32 rxfifo_addr;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+
+/**
+ * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
+ *
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
+ * @txfifo_size: size of TX FIFOs
+ * @rxfifo1_addr: RXF1 addr
+ * @rxfifo1_size: RXF1 size
+ */
+struct iwl_shared_mem_lmac_cfg {
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM];
+ __le32 rxfifo1_addr;
+ __le32 rxfifo1_size;
+
+} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_shared_mem_cfg - Shared memory configuration information
+ *
+ * @shared_mem_addr: shared memory address
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr
+ * @sample_buff_size: internal sample buff size
+ * @rxfifo2_addr: start addr of RXF2
+ * @rxfifo2_size: size of RXF2
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ * when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @lmac_num: number of LMACs (1 or 2)
+ * @lmac_smem: per - LMAC smem data
+ * @rxfifo2_control_addr: start addr of RXF2C
+ * @rxfifo2_control_size: size of RXF2C
+ */
+struct iwl_shared_mem_cfg {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 rxfifo2_addr;
+ __le32 rxfifo2_size;
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+ __le32 lmac_num;
+ struct iwl_shared_mem_lmac_cfg lmac_smem[3];
+ __le32 rxfifo2_control_addr;
+ __le32 rxfifo2_control_size;
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */
+
+/**
+ * struct iwl_mfuart_load_notif_v1 - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwl_mfuart_load_notif_v1 {
+ __le32 installed_ver;
+ __le32 external_ver;
+ __le32 status;
+ __le32 duration;
+} __packed; /* MFU_LOADER_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_mfuart_load_notif - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+ * @image_size: MFUART image size in bytes
+*/
+struct iwl_mfuart_load_notif {
+ __le32 installed_ver;
+ __le32 external_ver;
+ __le32 status;
+ __le32 duration;
+ /* image size valid only in v2 of the command */
+ __le32 image_size;
+} __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */
+
+/**
+ * struct iwl_mfu_assert_dump_notif - mfuart dump logs
+ * ( MFU_ASSERT_DUMP_NTF = 0xfe )
+ * @assert_id: mfuart assert id that cause the notif
+ * @curr_reset_num: number of asserts since uptime
+ * @index_num: current chunk id
+ * @parts_num: total number of chunks
+ * @data_size: number of data bytes sent
+ * @data: data buffer
+ */
+struct iwl_mfu_assert_dump_notif {
+ __le32 assert_id;
+ __le32 curr_reset_num;
+ __le16 index_num;
+ __le16 parts_num;
+ __le32 data_size;
+ __le32 data[];
+} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */
+
+/**
+ * enum iwl_mvm_marker_id - marker ids
+ *
+ * The ids for different type of markers to insert into the usniffer logs
+ *
+ * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
+ * @MARKER_ID_SYNC_CLOCK: sync FW time and systime
+ */
+enum iwl_mvm_marker_id {
+ MARKER_ID_TX_FRAME_LATENCY = 1,
+ MARKER_ID_SYNC_CLOCK = 2,
+}; /* MARKER_ID_API_E_VER_2 */
+
+/**
+ * struct iwl_mvm_marker - mark info into the usniffer logs
+ *
+ * (MARKER_CMD = 0xcb)
+ *
+ * Mark the UTC time stamp into the usniffer logs together with additional
+ * metadata, so the usniffer output can be parsed.
+ * In the command response the ucode will return the GP2 time.
+ *
+ * @dw_len: The amount of dwords following this byte including this byte.
+ * @marker_id: A unique marker id (iwl_mvm_marker_id).
+ * @reserved: reserved.
+ * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC
+ * @metadata: additional meta data that will be written to the unsiffer log
+ */
+struct iwl_mvm_marker {
+ u8 dw_len;
+ u8 marker_id;
+ __le16 reserved;
+ __le64 timestamp;
+ __le32 metadata[];
+} __packed; /* MARKER_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_marker_rsp - Response to marker cmd
+ *
+ * @gp2: The gp2 clock value in the FW
+ */
+struct iwl_mvm_marker_rsp {
+ __le32 gp2;
+} __packed;
+
+/* Operation types for the debug mem access */
+enum {
+ DEBUG_MEM_OP_READ = 0,
+ DEBUG_MEM_OP_WRITE = 1,
+ DEBUG_MEM_OP_WRITE_BYTES = 2,
+};
+
+#define DEBUG_MEM_MAX_SIZE_DWORDS 32
+
+/**
+ * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory
+ * @op: DEBUG_MEM_OP_*
+ * @addr: address to read/write from/to
+ * @len: in dwords, to read/write
+ * @data: for write opeations, contains the source buffer
+ */
+struct iwl_dbg_mem_access_cmd {
+ __le32 op;
+ __le32 addr;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
+
+/* Status responses for the debug mem access */
+enum {
+ DEBUG_MEM_STATUS_SUCCESS = 0x0,
+ DEBUG_MEM_STATUS_FAILED = 0x1,
+ DEBUG_MEM_STATUS_LOCKED = 0x2,
+ DEBUG_MEM_STATUS_HIDDEN = 0x3,
+ DEBUG_MEM_STATUS_LENGTH = 0x4,
+};
+
+/**
+ * struct iwl_dbg_mem_access_rsp - Response to debug mem commands
+ * @status: DEBUG_MEM_STATUS_*
+ * @len: read dwords (0 for write operations)
+ * @data: contains the read DWs
+ */
+struct iwl_dbg_mem_access_rsp {
+ __le32 status;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_dbg_suspend_resume_cmd - dbgc suspend resume command
+ * @operation: suspend or resume operation, uses
+ * &enum iwl_dbg_suspend_resume_cmds
+ */
+struct iwl_dbg_suspend_resume_cmd {
+ __le32 operation;
+} __packed;
+
+#define BUF_ALLOC_MAX_NUM_FRAGS 16
+
+/**
+ * struct iwl_buf_alloc_frag - a DBGC fragment
+ * @addr: base address of the fragment
+ * @size: size of the fragment
+ */
+struct iwl_buf_alloc_frag {
+ __le64 addr;
+ __le32 size;
+} __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */
+
+/**
+ * struct iwl_buf_alloc_cmd - buffer allocation command
+ * @alloc_id: &enum iwl_fw_ini_allocation_id
+ * @buf_location: &enum iwl_fw_ini_buffer_location
+ * @num_frags: number of fragments
+ * @frags: fragments array
+ */
+struct iwl_buf_alloc_cmd {
+ __le32 alloc_id;
+ __le32 buf_location;
+ __le32 num_frags;
+ struct iwl_buf_alloc_frag frags[BUF_ALLOC_MAX_NUM_FRAGS];
+} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_2 */
+
+#define DRAM_INFO_FIRST_MAGIC_WORD 0x76543210
+#define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF
+
+/**
+ * struct iwL_dram_info - DRAM fragments allocation struct
+ *
+ * Driver will fill in the first 1K(+) of the pointed DRAM fragment
+ *
+ * @first_word: magic word value
+ * @second_word: magic word value
+ * @framfrags: DRAM fragmentaion detail
+*/
+struct iwl_dram_info {
+ __le32 first_word;
+ __le32 second_word;
+ struct iwl_buf_alloc_cmd dram_frags[IWL_FW_INI_ALLOCATION_NUM - 1];
+} __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */
+
+/**
+ * struct iwl_dbgc1_info - DBGC1 address and size
+ *
+ * Driver will fill the dbcg1 address and size at address based on config TLV.
+ *
+ * @first_word: all 0 set as identifier
+ * @dbgc1_add_lsb: LSB bits of DBGC1 physical address
+ * @dbgc1_add_msb: MSB bits of DBGC1 physical address
+ * @dbgc1_size: DBGC1 size
+*/
+struct iwl_dbgc1_info {
+ __le32 first_word;
+ __le32 dbgc1_add_lsb;
+ __le32 dbgc1_add_msb;
+ __le32 dbgc1_size;
+} __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */
+
+/**
+ * struct iwl_dbg_host_event_cfg_cmd
+ * @enabled_severities: enabled severities
+ */
+struct iwl_dbg_host_event_cfg_cmd {
+ __le32 enabled_severities;
+} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dbg_dump_complete_cmd - dump complete cmd
+ *
+ * @tp: timepoint whose dump has completed
+ * @tp_data: timepoint data
+ */
+struct iwl_dbg_dump_complete_cmd {
+ __le32 tp;
+ __le32 tp_data;
+} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
new file mode 100644
index 000000000..88fe61d14
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_filter_h__
+#define __iwl_fw_api_filter_h__
+
+#include "fw/api/mac.h"
+
+#define MAX_PORT_ID_NUM 2
+#define MAX_MCAST_FILTERING_ADDRESSES 256
+
+/**
+ * struct iwl_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @reserved: reserved
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwl_mcast_filter_cmd {
+ u8 filter_own;
+ u8 port_id;
+ u8 count;
+ u8 pass_all;
+ u8 bssid[6];
+ u8 reserved[2];
+ u8 addr_list[];
+} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/led.h b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h
new file mode 100644
index 000000000..475bb4640
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_led_h__
+#define __iwl_fw_api_led_h__
+
+/**
+ * struct iwl_led_cmd - LED switching command
+ *
+ * @status: LED status (on/off)
+ */
+struct iwl_led_cmd {
+ __le32 status;
+} __packed; /* LEDS_CMD_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_led_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
new file mode 100644
index 000000000..12af94e16
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -0,0 +1,1694 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2021 Intel Corporation
+ */
+#ifndef __iwl_fw_api_location_h__
+#define __iwl_fw_api_location_h__
+
+/**
+ * enum iwl_location_subcmd_ids - location group command IDs
+ */
+enum iwl_location_subcmd_ids {
+ /**
+ * @TOF_RANGE_REQ_CMD: TOF ranging request,
+ * uses one of &struct iwl_tof_range_req_cmd_v5,
+ * &struct iwl_tof_range_req_cmd_v7,
+ * &struct iwl_tof_range_req_cmd_v8,
+ * &struct iwl_tof_range_req_cmd_v9,
+ * &struct iwl_tof_range_req_cmd_v11,
+ * &struct iwl_tof_range_req_cmd_v7
+ */
+ TOF_RANGE_REQ_CMD = 0x0,
+ /**
+ * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd
+ */
+ TOF_CONFIG_CMD = 0x1,
+ /**
+ * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses
+ * &struct iwl_tof_range_abort_cmd
+ */
+ TOF_RANGE_ABORT_CMD = 0x2,
+ /**
+ * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config,
+ * uses &struct iwl_tof_range_req_ext_cmd
+ */
+ TOF_RANGE_REQ_EXT_CMD = 0x3,
+ /**
+ * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration,
+ * uses &struct iwl_tof_responder_config_cmd
+ */
+ TOF_RESPONDER_CONFIG_CMD = 0x4,
+ /**
+ * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration,
+ * uses &struct iwl_tof_responder_dyn_config_cmd
+ */
+ TOF_RESPONDER_DYN_CONFIG_CMD = 0x5,
+ /**
+ * @CSI_HEADER_NOTIFICATION: CSI header
+ */
+ CSI_HEADER_NOTIFICATION = 0xFA,
+ /**
+ * @CSI_CHUNKS_NOTIFICATION: CSI chunk,
+ * uses &struct iwl_csi_chunk_notification
+ */
+ CSI_CHUNKS_NOTIFICATION = 0xFB,
+ /**
+ * @TOF_LC_NOTIF: used for LCI/civic location, contains just
+ * the action frame
+ */
+ TOF_LC_NOTIF = 0xFC,
+ /**
+ * @TOF_RESPONDER_STATS: FTM responder statistics notification,
+ * uses &struct iwl_ftm_responder_stats
+ */
+ TOF_RESPONDER_STATS = 0xFD,
+ /**
+ * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses
+ * &struct iwl_tof_mcsi_notif
+ */
+ TOF_MCSI_DEBUG_NOTIF = 0xFE,
+ /**
+ * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using
+ * &struct iwl_tof_range_rsp_ntfy
+ */
+ TOF_RANGE_RESPONSE_NOTIF = 0xFF,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: indicates if ToF is disabled (or not)
+ * @one_sided_disabled: indicates if one-sided is disabled (or not)
+ * @is_debug_mode: indiciates if debug mode is active
+ * @is_buf_required: indicates if channel estimation buffer is required
+ */
+struct iwl_tof_config_cmd {
+ u8 tof_disabled;
+ u8 one_sided_disabled;
+ u8 is_debug_mode;
+ u8 is_buf_required;
+} __packed;
+
+/**
+ * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth
+ * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT
+ * @IWL_TOF_BW_20_HT: 20 MHz HT
+ * @IWL_TOF_BW_40: 40 MHz
+ * @IWL_TOF_BW_80: 80 MHz
+ * @IWL_TOF_BW_160: 160 MHz
+ * @IWL_TOF_BW_NUM: number of tof bandwidths
+ */
+enum iwl_tof_bandwidth {
+ IWL_TOF_BW_20_LEGACY,
+ IWL_TOF_BW_20_HT,
+ IWL_TOF_BW_40,
+ IWL_TOF_BW_80,
+ IWL_TOF_BW_160,
+ IWL_TOF_BW_NUM,
+}; /* LOCAT_BW_TYPE_E */
+
+/*
+ * enum iwl_tof_algo_type - Algorithym type for range measurement request
+ */
+enum iwl_tof_algo_type {
+ IWL_TOF_ALGO_TYPE_MAX_LIKE = 0,
+ IWL_TOF_ALGO_TYPE_LINEAR_REG = 1,
+ IWL_TOF_ALGO_TYPE_FFT = 2,
+
+ /* Keep last */
+ IWL_TOF_ALGO_TYPE_INVALID,
+}; /* ALGO_TYPE_E */
+
+/*
+ * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications
+ */
+enum iwl_tof_mcsi_enable {
+ IWL_TOF_MCSI_DISABLED = 0,
+ IWL_TOF_MCSI_ENABLED = 1,
+}; /* MCSI_ENABLE_E */
+
+/**
+ * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg
+ * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is
+ * valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report
+ * support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT: enable/disable NDP ranging support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID: session id flag is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR: the bss_color field is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR: the
+ * min_time_between_msr and max_time_between_msr fields are valid
+ */
+enum iwl_tof_responder_cmd_valid_field {
+ IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0),
+ IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1),
+ IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2),
+ IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3),
+ IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4),
+ IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5),
+ IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6),
+ IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7),
+ IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8),
+ IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9),
+ IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10),
+ IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11),
+ IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12),
+ IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22),
+ IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23),
+ IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24),
+ IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID = BIT(25),
+ IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR = BIT(26),
+ IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR = BIT(27),
+};
+
+/**
+ * enum iwl_tof_responder_cfg_flags - responder configuration flags
+ * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support
+ * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics
+ * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI
+ * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type
+ * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode
+ * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode
+ * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode
+ * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support
+ * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail
+ * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask
+ * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging
+ * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the
+ * initiator supports it
+ * @IWL_TOF_RESPONDER_FLAGS_SESSION_ID: send the session id in the initial FTM
+ * frame.
+ */
+enum iwl_tof_responder_cfg_flags {
+ IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0),
+ IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1),
+ IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2),
+ IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5),
+ IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6),
+ IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7),
+ IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8),
+ IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9),
+ IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10),
+ IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_AB_MSK,
+ IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24),
+ IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25),
+ IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27),
+};
+
+/**
+ * struct iwl_tof_responder_config_cmd_v6 - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd_v6 {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 bandwidth;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_tof_responder_config_cmd_v7 - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd_v7 {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_7 */
+
+#define IWL_RESPONDER_STS_POS 3
+#define IWL_RESPONDER_TOTAL_LTF_POS 6
+
+/**
+ * struct iwl_tof_responder_config_cmd_v8 - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @r2i_ndp_params: parameters for R2I NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @i2r_ndp_params: parameters for I2R NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ */
+struct iwl_tof_responder_config_cmd_v8 {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
+
+/**
+ * struct iwl_tof_responder_config_cmd_v9 - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @bss_color: current AP bss_color
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @r2i_ndp_params: parameters for R2I NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @i2r_ndp_params: parameters for I2R NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @min_time_between_msr: for non trigger based NDP ranging, minimum time
+ * between measurements in milliseconds.
+ * @max_time_between_msr: for non trigger based NDP ranging, maximum time
+ * between measurements in milliseconds.
+ */
+struct iwl_tof_responder_config_cmd_v9 {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 bss_color;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+ __le16 max_time_between_msr;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
+
+#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
+
+/**
+ * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
+ * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
+ * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
+ * needed, 0-padding such that the next part is dword-aligned, then CIVIC
+ * data (if exists) follows, and then 0-padding again to complete a
+ * 4-multiple long buffer.
+ */
+struct iwl_tof_responder_dyn_config_cmd_v2 {
+ __le32 lci_len;
+ __le32 civic_len;
+ u8 lci_civic[];
+} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */
+
+#define IWL_LCI_MAX_SIZE 160
+#define IWL_CIVIC_MAX_SIZE 160
+#define HLTK_11AZ_LEN 32
+
+/**
+ * enum iwl_responder_dyn_cfg_valid_flags - valid flags for dyn_config_cmd
+ * @IWL_RESPONDER_DYN_CFG_VALID_LCI: LCI data is valid
+ * @IWL_RESPONDER_DYN_CFG_VALID_CIVIC: Civic data is valid
+ * @IWL_RESPONDER_DYN_CFG_VALID_PASN_STA: the pasn_addr, HLTK and cipher fields
+ * are valid.
+ */
+enum iwl_responder_dyn_cfg_valid_flags {
+ IWL_RESPONDER_DYN_CFG_VALID_LCI = BIT(0),
+ IWL_RESPONDER_DYN_CFG_VALID_CIVIC = BIT(1),
+ IWL_RESPONDER_DYN_CFG_VALID_PASN_STA = BIT(2),
+};
+
+/**
+ * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * @cipher: The negotiated cipher. see &enum iwl_location_cipher.
+ * @valid_flags: flags indicating which fields in the command are valid. see
+ * &enum iwl_responder_dyn_cfg_valid_flags.
+ * @lci_len: length of the LCI data in bytes
+ * @civic_len: length of the Civic data in bytes
+ * @lci_buf: the LCI buffer
+ * @civic_buf: the Civic buffer
+ * @hltk_buf: HLTK for secure LTF bits generation for the specified station
+ * @addr: mac address of the station for which to use the HLTK
+ * @reserved: for alignment
+ */
+struct iwl_tof_responder_dyn_config_cmd {
+ u8 cipher;
+ u8 valid_flags;
+ u8 lci_len;
+ u8 civic_len;
+ u8 lci_buf[IWL_LCI_MAX_SIZE];
+ u8 civic_buf[IWL_LCI_MAX_SIZE];
+ u8 hltk_buf[HLTK_11AZ_LEN];
+ u8 addr[ETH_ALEN];
+ u8 reserved[2];
+} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_3 */
+
+/**
+ * struct iwl_tof_range_req_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @reserved: reserved
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ * in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ * value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ * value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ * value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+ __le16 tsf_timer_offset_msec;
+ __le16 reserved;
+ u8 min_delta_ftm;
+ u8 ftm_format_and_bw20M;
+ u8 ftm_format_and_bw40M;
+ u8 ftm_format_and_bw80M;
+} __packed;
+
+/**
+ * enum iwl_tof_location_query - values for query bitmap
+ * @IWL_TOF_LOC_LCI: query LCI
+ * @IWL_TOF_LOC_CIVIC: query civic
+ */
+enum iwl_tof_location_query {
+ IWL_TOF_LOC_LCI = 0x01,
+ IWL_TOF_LOC_CIVIC = 0x02,
+};
+
+ /**
+ * struct iwl_tof_range_req_ap_entry_v2 - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth.
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @bssid: AP's BSSID
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
+ * number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31);
+ * 1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ * in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ * The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR;
+ * Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ * 0: Initiator interact with regular AP;
+ * 1: Initiator interact with Responder machine: need to send the
+ * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ * use it for its ch est measurement (this flag will be set when we
+ * configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ * @algo_type: &enum iwl_tof_algo_type
+ * @notify_mcsi: &enum iwl_tof_mcsi_ntfy.
+ * @reserved: For alignment and future use
+ */
+struct iwl_tof_range_req_ap_entry_v2 {
+ u8 channel_num;
+ u8 bandwidth;
+ u8 tsf_delta_direction;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ u8 measure_type;
+ u8 num_of_bursts;
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 retries_per_sample;
+ __le32 tsf_delta;
+ u8 location_req;
+ u8 asap_mode;
+ u8 enable_dyn_ack;
+ s8 rssi;
+ u8 algo_type;
+ u8 notify_mcsi;
+ __le16 reserved;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_2 */
+
+/**
+ * enum iwl_initiator_ap_flags - per responder FTM configuration flags
+ * @IWL_INITIATOR_AP_FLAGS_ASAP: Request for ASAP measurement.
+ * @IWL_INITIATOR_AP_FLAGS_LCI_REQUEST: Request for LCI information
+ * @IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST: Request for CIVIC information
+ * @IWL_INITIATOR_AP_FLAGS_DYN_ACK: Send HT/VHT ack for FTM frames. If not set,
+ * 20Mhz dup acks will be sent.
+ * @IWL_INITIATOR_AP_FLAGS_ALGO_LR: Use LR algo type for rtt calculation.
+ * Default algo type is ML.
+ * @IWL_INITIATOR_AP_FLAGS_ALGO_FFT: Use FFT algo type for rtt calculation.
+ * Default algo type is ML.
+ * @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the
+ * driver.
+ * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow
+ * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow
+ * @IWL_INITIATOR_AP_FLAGS_SECURED: request secure LTF measurement
+ * @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback
+ * @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request
+ * instead of fw internal values.
+ * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR
+ * frames with protected management frames.
+ * @IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK: terminate the session if
+ * the responder asked for LMR feedback although the initiator did not set
+ * the LMR feedback bit in the FTM request. If not set, the initiator will
+ * continue with the session and will provide the LMR feedback.
+ */
+enum iwl_initiator_ap_flags {
+ IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
+ IWL_INITIATOR_AP_FLAGS_LCI_REQUEST = BIT(2),
+ IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST = BIT(3),
+ IWL_INITIATOR_AP_FLAGS_DYN_ACK = BIT(4),
+ IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5),
+ IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6),
+ IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8),
+ IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9),
+ IWL_INITIATOR_AP_FLAGS_TB = BIT(10),
+ IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11),
+ IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12),
+ IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
+ IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
+ IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15),
+};
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v3 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @bandwidth: AP bandwidth. One of iwl_tof_bandwidth.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @reserved: For alignment and future use
+ * @tsf_delta: not in use
+ */
+struct iwl_tof_range_req_ap_entry_v3 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 bandwidth;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ __le16 reserved;
+ __le32 tsf_delta;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
+
+/**
+ * enum iwl_location_frame_format - location frame formats
+ * @IWL_LOCATION_FRAME_FORMAT_LEGACY: legacy
+ * @IWL_LOCATION_FRAME_FORMAT_HT: HT
+ * @IWL_LOCATION_FRAME_FORMAT_VHT: VHT
+ * @IWL_LOCATION_FRAME_FORMAT_HE: HE
+ */
+enum iwl_location_frame_format {
+ IWL_LOCATION_FRAME_FORMAT_LEGACY,
+ IWL_LOCATION_FRAME_FORMAT_HT,
+ IWL_LOCATION_FRAME_FORMAT_VHT,
+ IWL_LOCATION_FRAME_FORMAT_HE,
+};
+
+/**
+ * enum iwl_location_bw - location bandwidth selection
+ * @IWL_LOCATION_BW_20MHZ: 20MHz
+ * @IWL_LOCATION_BW_40MHZ: 40MHz
+ * @IWL_LOCATION_BW_80MHZ: 80MHz
+ */
+enum iwl_location_bw {
+ IWL_LOCATION_BW_20MHZ,
+ IWL_LOCATION_BW_40MHZ,
+ IWL_LOCATION_BW_80MHZ,
+ IWL_LOCATION_BW_160MHZ,
+};
+
+#define TK_11AZ_LEN 32
+
+#define LOCATION_BW_POS 4
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v4 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @reserved: For alignment and future use
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ */
+struct iwl_tof_range_req_ap_entry_v4 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ __le16 reserved;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
+
+/**
+ * enum iwl_location_cipher - location cipher selection
+ * @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
+ * @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
+ * @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
+ * @IWL_LOCATION_CIPHER_INVALID: security is not used.
+ * @IWL_LOCATION_CIPHER_MAX: maximum value for this enum.
+ */
+enum iwl_location_cipher {
+ IWL_LOCATION_CIPHER_CCMP_128,
+ IWL_LOCATION_CIPHER_GCMP_128,
+ IWL_LOCATION_CIPHER_GCMP_256,
+ IWL_LOCATION_CIPHER_INVALID,
+ IWL_LOCATION_CIPHER_MAX,
+};
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v6 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ */
+struct iwl_tof_range_req_ap_entry_v6 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v7 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ */
+struct iwl_tof_range_req_ap_entry_v7 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
+
+#define IWL_LOCATION_MAX_STS_POS 3
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: reserved
+ * @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: reserved
+ * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation.
+ * One of &enum ieee80211_range_params_max_total_ltf.
+ * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation.
+ * One of &enum ieee80211_range_params_max_total_ltf.
+ */
+struct iwl_tof_range_req_ap_entry_v8 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ u8 r2i_max_total_ltf;
+ u8 i2r_max_total_ltf;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */
+
+/**
+ * struct iwl_tof_range_req_ap_entry_v9 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: For EDCA based ranging: Recommended value to be sent to the
+ * AP. Measurement periodicity In units of 100ms. ignored if
+ * num_of_bursts_exp = 0.
+ * For non trigger based NDP ranging, the maximum time between
+ * measurements in units of milliseconds.
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @bss_color: the BSS color of the responder. Only valid if
+ * &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: reserved
+ * @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: reserved
+ * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation.
+ * One of &enum ieee80211_range_params_max_total_ltf.
+ * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation.
+ * One of &enum ieee80211_range_params_max_total_ltf.
+ * @bss_color: the BSS color of the responder. Only valid if
+ * &IWL_INITIATOR_AP_FLAGS_NON_TB or &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
+ * @min_time_between_msr: For non trigger based NDP ranging, the minimum time
+ * between measurements in units of milliseconds
+ */
+struct iwl_tof_range_req_ap_entry_v9 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ u16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ u8 r2i_max_total_ltf;
+ u8 i2r_max_total_ltf;
+ u8 bss_color;
+ u8 band;
+ __le16 min_time_between_msr;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
+ * possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon
+ * timeout expiration
+ * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the
+ * earlier of: measurements completion / timeout
+ * expiration.
+ */
+enum iwl_tof_response_mode {
+ IWL_MVM_TOF_RESPONSE_ASAP,
+ IWL_MVM_TOF_RESPONSE_TIMEOUT,
+ IWL_MVM_TOF_RESPONSE_COMPLETE,
+};
+
+/**
+ * enum iwl_tof_initiator_flags
+ *
+ * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run
+ * the algo on ant A+B, instead of only one of them.
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM: use random mac address for FTM
+ * @IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB: use the specific calib value from
+ * the range request command
+ * @IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB: use the common calib value from the
+ * ragne request command
+ * @IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT: support non-asap measurements
+ */
+enum iwl_tof_initiator_flags {
+ IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6),
+ IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM = BIT(7),
+ IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB = BIT(15),
+ IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB = BIT(16),
+ IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20),
+}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
+
+#define IWL_MVM_TOF_MAX_APS 5
+#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
+
+/**
+ * struct iwl_tof_range_req_cmd_v5 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @initiator: 0- NW initiated, 1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ * '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ * This is equivalent to the session time configured to the
+ * LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ * the range report will be uploaded as a batch when ready or
+ * when the session is done (successfully / partially).
+ * one of iwl_tof_response_mode.
+ * @reserved0: reserved
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ * '1' Use MAC Address randomization according to the below
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT)
+ * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT)
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data
+ */
+struct iwl_tof_range_req_cmd_v5 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 initiator;
+ u8 one_sided_los_disable;
+ u8 req_timeout;
+ u8 report_policy;
+ u8 reserved0;
+ u8 num_of_ap;
+ u8 macaddr_random;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 ftm_rx_chains;
+ u8 ftm_tx_chains;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
+
+/**
+ * struct iwl_tof_range_req_cmd_v7 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v7 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+
+/**
+ * struct iwl_tof_range_req_cmd_v8 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v8 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
+
+/**
+ * struct iwl_tof_range_req_cmd_v9 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v9 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
+
+/**
+ * struct iwl_tof_range_req_cmd_v11 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v11 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
+
+/**
+ * struct iwl_tof_range_req_cmd_v12 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v12 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */
+
+/**
+ * struct iwl_tof_range_req_cmd_v13 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v9.
+ */
+struct iwl_tof_range_req_cmd_v13 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+
+/*
+ * enum iwl_tof_range_request_status - status of the sent request
+ * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
+ * request
+ * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the
+ * sent request will not be handled
+ */
+enum iwl_tof_range_request_status {
+ IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS,
+ IWL_TOF_RANGE_REQUEST_STATUS_BUSY,
+};
+
+/**
+ * enum iwl_tof_entry_status
+ *
+ * @IWL_TOF_ENTRY_SUCCESS: successful measurement.
+ * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure.
+ * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request.
+ * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request.
+ * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet.
+ * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no
+ * measurement was completed.
+ * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch
+ * from the primary channel.
+ * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM.
+ * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown
+ * reason.
+ * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid
+ * T1/T4.
+ * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame
+ * structure.
+ * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled.
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the
+ * initiator for some period, period supplied in @refusal_period.
+ * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments.
+ * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled.
+ * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original
+ * parameters within the current session.
+ */
+enum iwl_tof_entry_status {
+ IWL_TOF_ENTRY_SUCCESS = 0,
+ IWL_TOF_ENTRY_GENERAL_FAILURE = 1,
+ IWL_TOF_ENTRY_NO_RESPONSE = 2,
+ IWL_TOF_ENTRY_REQUEST_REJECTED = 3,
+ IWL_TOF_ENTRY_NOT_SCHEDULED = 4,
+ IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5,
+ IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6,
+ IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7,
+ IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8,
+ IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9,
+ IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10,
+ IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11,
+ IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12,
+ IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13,
+ IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14,
+ IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15,
+}; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v3 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @reserved: reserved
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 reserved;
+ u8 refusal_period;
+ __le32 range;
+ __le32 range_variance;
+ __le32 timestamp;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v5 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+ u8 rttConfidence;
+ u8 reserved[3];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
+ * @rx_pn: the last PN used for this responder Rx in case PMF is configured in
+ * LE byte order.
+ * @tx_pn: the last PN used for this responder Tx in case PMF is configured in
+ * LE byte order.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+ u8 rttConfidence;
+ u8 reserved[3];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6,
+ LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */
+
+
+/**
+ * enum iwl_tof_response_status - tof response status
+ *
+ * @IWL_TOF_RESPONSE_SUCCESS: successful range.
+ * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration.
+ * partial result of ranges done so far is included in the response.
+ * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command.
+ * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed.
+ */
+enum iwl_tof_response_status {
+ IWL_TOF_RESPONSE_SUCCESS = 0,
+ IWL_TOF_RESPONSE_TIMEOUT = 1,
+ IWL_TOF_RESPONSE_ABORTED = 4,
+ IWL_TOF_RESPONSE_FAILED = 5,
+}; /* LOCATION_RNG_RSP_STATUS */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy_v5 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session, one of
+ * &enum iwl_tof_response_status.
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v5 {
+ u8 request_id;
+ u8 request_status;
+ u8 last_in_batch;
+ u8 num_of_aps;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v6 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy_v7 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v7 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v8 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
+ LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @reserved: reserved
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+ u8 token;
+ u8 role;
+ __le16 reserved;
+ u8 initiator_bssid[ETH_ALEN];
+ u8 responder_bssid[ETH_ALEN];
+ u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ * @reserved: reserved
+ */
+struct iwl_tof_range_abort_cmd {
+ u8 request_id;
+ u8 reserved[3];
+} __packed;
+
+enum ftm_responder_stats_flags {
+ FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0),
+ FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1),
+ FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2),
+ FTM_RESP_STAT_TRIGGER_DUP = BIT(3),
+ FTM_RESP_STAT_DUP = BIT(4),
+ FTM_RESP_STAT_DUP_IN_WIN = BIT(5),
+ FTM_RESP_STAT_DUP_OUT_WIN = BIT(6),
+ FTM_RESP_STAT_SCHED_SUCCESS = BIT(7),
+ FTM_RESP_STAT_ASAP_REQ = BIT(8),
+ FTM_RESP_STAT_NON_ASAP_REQ = BIT(9),
+ FTM_RESP_STAT_ASAP_RESP = BIT(10),
+ FTM_RESP_STAT_NON_ASAP_RESP = BIT(11),
+ FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12),
+ FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13),
+ FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14),
+ FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15),
+ FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16),
+ FTM_RESP_STAT_FAIL_GC = BIT(17),
+ FTM_RESP_STAT_SUCCESS = BIT(18),
+ FTM_RESP_STAT_INTEL_IE = BIT(19),
+ FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20),
+ FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21),
+ FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22),
+ FTM_RESP_STAT_PROCESS_FAIL = BIT(23),
+ FTM_RESP_STAT_ACK = BIT(24),
+ FTM_RESP_STAT_NACK = BIT(25),
+ FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26),
+ FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27),
+ FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28),
+ FTM_RESP_STAT_INITIATOR_ADDED = BIT(29),
+ FTM_RESP_STAT_ERR_LIST_FULL = BIT(30),
+ FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31),
+}; /* RESP_IND_E */
+
+/**
+ * struct iwl_ftm_responder_stats - FTM responder statistics
+ * @addr: initiator address
+ * @success_ftm: number of successful ftm frames
+ * @ftm_per_burst: num of FTM frames that were received
+ * @flags: &enum ftm_responder_stats_flags
+ * @duration: actual duration of FTM
+ * @allocated_duration: time that was allocated for this FTM session
+ * @bw: FTM request bandwidth
+ * @rate: FTM request rate
+ * @reserved: for alingment and future use
+ */
+struct iwl_ftm_responder_stats {
+ u8 addr[ETH_ALEN];
+ u8 success_ftm;
+ u8 ftm_per_burst;
+ __le32 flags;
+ __le32 duration;
+ __le32 allocated_duration;
+ u8 bw;
+ u8 rate;
+ __le16 reserved;
+} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */
+
+#define IWL_CSI_MAX_EXPECTED_CHUNKS 16
+
+#define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_1 0x0003
+#define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_1 0x000c
+
+#define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_2 0x00ff
+#define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_2 0xff00
+
+struct iwl_csi_chunk_notification {
+ __le32 token;
+ __le16 seq;
+ __le16 ctl;
+ __le32 size;
+ u8 data[];
+} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1/VER_2 */
+
+#endif /* __iwl_fw_api_location_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
new file mode 100644
index 000000000..712532f17
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_mac_cfg_h__
+#define __iwl_fw_api_mac_cfg_h__
+
+/**
+ * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs
+ */
+enum iwl_mac_conf_subcmd_ids {
+ /**
+ * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
+ */
+ LOW_LATENCY_CMD = 0x3,
+ /**
+ * @CHANNEL_SWITCH_TIME_EVENT_CMD: &struct iwl_chan_switch_te_cmd
+ */
+ CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4,
+ /**
+ * @MISSED_VAP_NOTIF: &struct iwl_missed_vap_notif
+ */
+ MISSED_VAP_NOTIF = 0xFA,
+ /**
+ * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
+ */
+ SESSION_PROTECTION_CMD = 0x5,
+ /**
+ * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
+ */
+ CANCEL_CHANNEL_SWITCH_CMD = 0x6,
+
+ /**
+ * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
+ */
+ SESSION_PROTECTION_NOTIF = 0xFB,
+
+ /**
+ * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
+ */
+ PROBE_RESPONSE_DATA_NOTIF = 0xFC,
+
+ /**
+ * @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
+ */
+ CHANNEL_SWITCH_START_NOTIF = 0xFF,
+
+ /**
+ *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
+ */
+ CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
+};
+
+#define IWL_P2P_NOA_DESC_COUNT (2)
+
+/**
+ * struct iwl_p2p_noa_attr - NOA attr contained in probe resp FW notification
+ *
+ * @id: attribute id
+ * @len_low: length low half
+ * @len_high: length high half
+ * @idx: instance of NoA timing
+ * @ctwin: GO's ct window and pwer save capability
+ * @desc: NoA descriptor
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_p2p_noa_attr {
+ u8 id;
+ u8 len_low;
+ u8 len_high;
+ u8 idx;
+ u8 ctwin;
+ struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT];
+ u8 reserved;
+} __packed;
+
+#define IWL_PROBE_RESP_DATA_NO_CSA (0xff)
+
+/**
+ * struct iwl_probe_resp_data_notif - notification with NOA and CSA counter
+ *
+ * @mac_id: the mac which should send the probe response
+ * @noa_active: notifies if the noa attribute should be handled
+ * @noa_attr: P2P NOA attribute
+ * @csa_counter: current csa counter
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_probe_resp_data_notif {
+ __le32 mac_id;
+ __le32 noa_active;
+ struct iwl_p2p_noa_attr noa_attr;
+ u8 csa_counter;
+ u8 reserved[3];
+} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_missed_vap_notif - notification of missing vap detection
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @num_beacon_intervals_elapsed: beacons elpased with no vap profile inside
+ * @profile_periodicity: beacons period to have our profile inside
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_missed_vap_notif {
+ __le32 mac_id;
+ u8 num_beacon_intervals_elapsed;
+ u8 profile_periodicity;
+ u8 reserved[2];
+} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_channel_switch_start_notif - Channel switch start notification
+ *
+ * @id_and_color: ID and color of the MAC
+ */
+struct iwl_channel_switch_start_notif {
+ __le32 id_and_color;
+} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+
+#define CS_ERR_COUNT_ERROR BIT(0)
+#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
+#define CS_ERR_LONG_TX_BLOCK BIT(2)
+#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
+
+/**
+ * struct iwl_channel_switch_error_notif - Channel switch error notification
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @csa_err_mask: mask of channel switch error that can occur
+ */
+struct iwl_channel_switch_error_notif {
+ __le32 mac_id;
+ __le32 csa_err_mask;
+} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
+ *
+ * @mac_id: the mac that should cancel the channel switch
+ */
+struct iwl_cancel_channel_switch_cmd {
+ __le32 mac_id;
+} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
+
+/**
+ * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
+ *
+ * @mac_id: MAC ID for channel switch
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @tsf: beacon tsf
+ * @cs_count: channel switch count from CSA/eCSA IE
+ * @cs_delayed_bcn_count: if set to N (!= 0) GO/AP can delay N beacon intervals
+ * at the new channel after the channel switch, otherwise (N == 0) expect
+ * beacon right after the channel switch.
+ * @cs_mode: 1 - quiet, 0 - otherwise
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_chan_switch_te_cmd {
+ __le32 mac_id;
+ __le32 action;
+ __le32 tsf;
+ u8 cs_count;
+ u8 cs_delayed_bcn_count;
+ u8 cs_mode;
+ u8 reserved;
+} __packed; /* MAC_CHANNEL_SWITCH_TIME_EVENT_S_VER_2 */
+
+/**
+ * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
+ *
+ * @mac_id: MAC ID to whom to apply the low-latency configurations
+ * @low_latency_rx: 1/0 to set/clear Rx low latency direction
+ * @low_latency_tx: 1/0 to set/clear Tx low latency direction
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_mac_low_latency_cmd {
+ __le32 mac_id;
+ u8 low_latency_rx;
+ u8 low_latency_tx;
+ __le16 reserved;
+} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
new file mode 100644
index 000000000..9b7caf968
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_mac_h__
+#define __iwl_fw_api_mac_h__
+
+/*
+ * The first MAC indices (starting from 0) are available to the driver,
+ * AUX indices follows - 1 for non-CDB, 2 for CDB.
+ */
+#define MAC_INDEX_AUX 4
+#define MAC_INDEX_MIN_DRIVER 0
+#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
+#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
+#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
+
+#define IWL_MVM_STATION_COUNT_MAX 16
+#define IWL_MVM_INVALID_STA 0xFF
+
+enum iwl_ac {
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VO,
+ AC_NUM,
+};
+
+/**
+ * enum iwl_mac_protection_flags - MAC context flags
+ * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+enum iwl_mac_protection_flags {
+ MAC_PROT_FLG_TGG_PROTECT = BIT(3),
+ MAC_PROT_FLG_HT_PROT = BIT(23),
+ MAC_PROT_FLG_FAT_PROT = BIT(24),
+ MAC_PROT_FLG_SELF_CTS_EN = BIT(30),
+};
+
+#define MAC_FLG_SHORT_SLOT BIT(4)
+#define MAC_FLG_SHORT_PREAMBLE BIT(5)
+
+/**
+ * enum iwl_mac_types - Supported MAC types
+ * @FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @FW_MAC_TYPE_IBSS: IBSS
+ * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @FW_MAC_TYPE_P2P_STA: P2P client
+ * @FW_MAC_TYPE_GO: P2P GO
+ * @FW_MAC_TYPE_TEST: ?
+ * @FW_MAC_TYPE_MAX: highest support MAC type
+ */
+enum iwl_mac_types {
+ FW_MAC_TYPE_FIRST = 1,
+ FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
+ FW_MAC_TYPE_LISTENER,
+ FW_MAC_TYPE_PIBSS,
+ FW_MAC_TYPE_IBSS,
+ FW_MAC_TYPE_BSS_STA,
+ FW_MAC_TYPE_P2P_DEVICE,
+ FW_MAC_TYPE_P2P_STA,
+ FW_MAC_TYPE_GO,
+ FW_MAC_TYPE_TEST,
+ FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
+}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_tsf_id - TSF hw timer ID
+ * @TSF_ID_A: use TSF A
+ * @TSF_ID_B: use TSF B
+ * @TSF_ID_C: use TSF C
+ * @TSF_ID_D: use TSF D
+ * @NUM_TSF_IDS: number of TSF timers available
+ */
+enum iwl_tsf_id {
+ TSF_ID_A = 0,
+ TSF_ID_B = 1,
+ TSF_ID_C = 2,
+ TSF_ID_D = 3,
+ NUM_TSF_IDS = 4,
+}; /* TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @reserved1: reserved
+ * @dtim_interval: dtim transmit time in TU
+ * @reserved2: reserved
+ * @mcast_qid: queue ID for multicast traffic.
+ * NOTE: obsolete from VER2 and on
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ap {
+ __le32 beacon_time;
+ __le64 beacon_tsf;
+ __le32 bi;
+ __le32 reserved1;
+ __le32 dtim_interval;
+ __le32 reserved2;
+ __le32 mcast_qid;
+ __le32 beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @reserved: reserved
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ibss {
+ __le32 beacon_time;
+ __le64 beacon_tsf;
+ __le32 bi;
+ __le32 reserved;
+ __le32 beacon_template;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwl_mac_data_policy - policy of the data path for this MAC
+ * @TWT_SUPPORTED: twt is supported
+ * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to
+ * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT
+ * early termination detection.
+ * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
+ * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
+ * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT
+ * @COEX_HIGH_PRIORITY_ENABLE: high priority mode for BT coex, to be used
+ * during 802.1X negotiation (and allowed during 4-way-HS)
+ */
+enum iwl_mac_data_policy {
+ TWT_SUPPORTED = BIT(0),
+ MORE_DATA_ACK_SUPPORTED = BIT(1),
+ FLEXIBLE_TWT_SUPPORTED = BIT(2),
+ PROTECTED_TWT_SUPPORTED = BIT(3),
+ BROADCAST_TWT_SUPPORTED = BIT(4),
+ COEX_HIGH_PRIORITY_ENABLE = BIT(5),
+};
+
+/**
+ * struct iwl_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @reserved1: reserved
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @data_policy: see &enum iwl_mac_data_policy
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ * @assoc_beacon_arrive_time: TSF of first beacon after association
+ */
+struct iwl_mac_data_sta {
+ __le32 is_assoc;
+ __le32 dtim_time;
+ __le64 dtim_tsf;
+ __le32 bi;
+ __le32 reserved1;
+ __le32 dtim_interval;
+ __le32 data_policy;
+ __le32 listen_interval;
+ __le32 assoc_id;
+ __le32 assoc_beacon_arrive_time;
+} __packed; /* STA_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwl_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwl_mac_data_go {
+ struct iwl_mac_data_ap ap;
+ __le32 ctwin;
+ __le32 opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwl_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwl_mac_data_p2p_sta {
+ struct iwl_mac_data_sta sta;
+ __le32 ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwl_mac_data_pibss {
+ __le32 stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwl_mac_data_p2p_dev {
+ __le32 is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwl_mac_filter_flags - MAC context filter flags
+ * @MAC_FILTER_IN_PROMISC: accept all data frames
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
+ * control frames to the host
+ * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+enum iwl_mac_filter_flags {
+ MAC_FILTER_IN_PROMISC = BIT(0),
+ MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1),
+ MAC_FILTER_ACCEPT_GRP = BIT(2),
+ MAC_FILTER_DIS_DECRYPT = BIT(3),
+ MAC_FILTER_DIS_GRP_DECRYPT = BIT(4),
+ MAC_FILTER_IN_BEACON = BIT(6),
+ MAC_FILTER_OUT_BCAST = BIT(8),
+ MAC_FILTER_IN_CRC32 = BIT(11),
+ MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+ /**
+ * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+ */
+ MAC_FILTER_IN_11AX = BIT(14),
+};
+
+/**
+ * enum iwl_mac_qos_flags - QoS flags
+ * @MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @MAC_QOS_FLG_TGN: HT is enabled
+ * @MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+enum iwl_mac_qos_flags {
+ MAC_QOS_FLG_UPDATE_EDCA = BIT(0),
+ MAC_QOS_FLG_TGN = BIT(1),
+ MAC_QOS_FLG_TXOP_TYPE = BIT(4),
+};
+
+/**
+ * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwl_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifsn;
+ u8 fifos_mask;
+ __le16 edca_txop;
+} __packed; /* AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @mac_type: one of &enum iwl_mac_types
+ * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id
+ * @node_addr: MAC address
+ * @reserved_for_node_addr: reserved
+ * @bssid_addr: BSSID
+ * @reserved_for_bssid_addr: reserved
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of &enum iwl_mac_protection_flags
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of &enum iwl_mac_filter_flags
+ * @qos_flags: from &enum iwl_mac_qos_flags
+ * @ac: one iwl_mac_qos configuration for each AC
+ */
+struct iwl_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ __le32 mac_type;
+ __le32 tsf_id;
+ u8 node_addr[6];
+ __le16 reserved_for_node_addr;
+ u8 bssid_addr[6];
+ __le16 reserved_for_bssid_addr;
+ __le32 cck_rates;
+ __le32 ofdm_rates;
+ __le32 protection_flags;
+ __le32 cck_short_preamble;
+ __le32 short_slot;
+ __le32 filter_flags;
+ /* MAC_QOS_PARAM_API_S_VER_1 */
+ __le32 qos_flags;
+ struct iwl_ac_qos ac[AC_NUM+1];
+ /* MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwl_mac_data_ap ap;
+ struct iwl_mac_data_go go;
+ struct iwl_mac_data_sta sta;
+ struct iwl_mac_data_p2p_sta p2p_sta;
+ struct iwl_mac_data_p2p_dev p2p_dev;
+ struct iwl_mac_data_pibss pibss;
+ struct iwl_mac_data_ibss ibss;
+ };
+} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
+
+#define IWL_NONQOS_SEQ_GET 0x1
+#define IWL_NONQOS_SEQ_SET 0x2
+struct iwl_nonqos_seq_query_cmd {
+ __le32 get_set_flag;
+ __le32 mac_id_n_color;
+ __le16 value;
+ __le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
+/**
+ * struct iwl_missed_beacons_notif - information on missed beacons
+ * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons: number of expected beacons
+ * @num_recvd_beacons: number of received beacons
+ */
+struct iwl_missed_beacons_notif {
+ __le32 mac_id;
+ __le32 consec_missed_beacons_since_last_rx;
+ __le32 consec_missed_beacons;
+ __le32 num_expected_beacons;
+ __le32 num_recvd_beacons;
+} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ * allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+ __le16 cwmin;
+ __le16 cwmax;
+ __le16 aifsn;
+ __le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_pkt_ext_constellations - PPE constellation indices
+ * @IWL_HE_PKT_EXT_BPSK: BPSK
+ * @IWL_HE_PKT_EXT_QPSK: QPSK
+ * @IWL_HE_PKT_EXT_16QAM: 16-QAM
+ * @IWL_HE_PKT_EXT_64QAM: 64-QAM
+ * @IWL_HE_PKT_EXT_256QAM: 256-QAM
+ * @IWL_HE_PKT_EXT_1024QAM: 1024-QAM
+ * @IWL_HE_PKT_EXT_RESERVED: reserved value
+ * @IWL_HE_PKT_EXT_NONE: not defined
+ */
+enum iwl_he_pkt_ext_constellations {
+ IWL_HE_PKT_EXT_BPSK = 0,
+ IWL_HE_PKT_EXT_QPSK,
+ IWL_HE_PKT_EXT_16QAM,
+ IWL_HE_PKT_EXT_64QAM,
+ IWL_HE_PKT_EXT_256QAM,
+ IWL_HE_PKT_EXT_1024QAM,
+ IWL_HE_PKT_EXT_RESERVED,
+ IWL_HE_PKT_EXT_NONE,
+};
+
+#define MAX_HE_SUPP_NSS 2
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
+
+/**
+ * struct iwl_he_pkt_ext_v1 - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext_v1 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
+
+/**
+ * struct iwl_he_pkt_ext_v2 - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x
+ * BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext_v2 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ * control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ * color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ * of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ * and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ * parameter set, i.e. the backoff counters for trig-based ACs
+ * @STA_CTXT_HE_NIC_NOT_ACK_ENABLED: mark that the NIC doesn't support receiving
+ * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG).
+ * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
+ * len delim to determine if AGG or single.
+ * @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
+ * not allowed (as there are OBSS that might classify such transmissions as
+ * radar pulses).
+ * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
+ * of threshold
+ * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
+ * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
+ * extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
+ */
+enum iwl_he_sta_ctxt_flags {
+ STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
+ STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
+ STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
+ STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
+ STA_CTXT_HE_PACKET_EXT = BIT(8),
+ STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
+ STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
+ STA_CTXT_HE_ACK_ENABLED = BIT(11),
+ STA_CTXT_HE_MU_EDCA_CW = BIT(12),
+ STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
+ STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
+ STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
+ STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
+ STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ * support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+ IWL_HE_HTC_SUPPORT = BIT(0),
+ IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
+ IWL_HE_HTC_BSR_SUPP = BIT(4),
+ IWL_HE_HTC_OMI_SUPP = BIT(5),
+ IWL_HE_HTC_BQR_SUPP = BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ * response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS (1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd_v1 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext_v1 pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
+
+/**
+ * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
+ */
+struct iwl_he_sta_context_cmd_v2 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext_v1 pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
+
+/**
+ * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @puncture_mask: puncture mask for EHT
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
+ */
+struct iwl_he_sta_context_cmd_v3 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext_v2 pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 puncture_mask;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
+
+/**
+ * struct iwl_he_monitor_cmd - configure air sniffer for HE
+ * @bssid: the BSSID to sniff for
+ * @reserved1: reserved for dword alignment
+ * @aid: the AID to track on for HE MU
+ * @reserved2: reserved for future use
+ */
+struct iwl_he_monitor_cmd {
+ u8 bssid[6];
+ __le16 reserved1;
+ __le16 aid;
+ u8 reserved2[6];
+} __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
new file mode 100644
index 000000000..91bfde6d5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_nvm_reg_h__
+#define __iwl_fw_api_nvm_reg_h__
+
+/**
+ * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
+ */
+enum iwl_regulatory_and_nvm_subcmd_ids {
+ /**
+ * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd
+ */
+ NVM_ACCESS_COMPLETE = 0x0,
+
+ /**
+ * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd
+ */
+ LARI_CONFIG_CHANGE = 0x1,
+
+ /**
+ * @NVM_GET_INFO:
+ * Command is &struct iwl_nvm_get_info,
+ * response is &struct iwl_nvm_get_info_rsp
+ */
+ NVM_GET_INFO = 0x2,
+
+ /**
+ * @TAS_CONFIG: &struct iwl_tas_config_cmd
+ */
+ TAS_CONFIG = 0x3,
+
+ /**
+ * @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd
+ */
+ SAR_OFFSET_MAPPING_TABLE_CMD = 0x4,
+
+ /**
+ * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
+ */
+ PNVM_INIT_COMPLETE_NTFY = 0xFE,
+};
+
+/**
+ * enum iwl_nvm_access_op - NVM access opcode
+ * @IWL_NVM_READ: read NVM
+ * @IWL_NVM_WRITE: write NVM
+ */
+enum iwl_nvm_access_op {
+ IWL_NVM_READ = 0,
+ IWL_NVM_WRITE = 1,
+};
+
+/**
+ * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD
+ * @NVM_ACCESS_TARGET_CACHE: access the cache
+ * @NVM_ACCESS_TARGET_OTP: access the OTP
+ * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM
+ */
+enum iwl_nvm_access_target {
+ NVM_ACCESS_TARGET_CACHE = 0,
+ NVM_ACCESS_TARGET_OTP = 1,
+ NVM_ACCESS_TARGET_EEPROM = 2,
+};
+
+/**
+ * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD
+ * @NVM_SECTION_TYPE_SW: software section
+ * @NVM_SECTION_TYPE_REGULATORY: regulatory section
+ * @NVM_SECTION_TYPE_CALIBRATION: calibration section
+ * @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
+ * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
+ * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
+ * @NVM_MAX_NUM_SECTIONS: number of sections
+ */
+enum iwl_nvm_section_type {
+ NVM_SECTION_TYPE_SW = 1,
+ NVM_SECTION_TYPE_REGULATORY = 3,
+ NVM_SECTION_TYPE_CALIBRATION = 4,
+ NVM_SECTION_TYPE_PRODUCTION = 5,
+ NVM_SECTION_TYPE_REGULATORY_SDP = 8,
+ NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
+ NVM_SECTION_TYPE_PHY_SKU = 12,
+ NVM_MAX_NUM_SECTIONS = 13,
+};
+
+/**
+ * struct iwl_nvm_access_cmd - Request the device to send an NVM section
+ * @op_code: &enum iwl_nvm_access_op
+ * @target: &enum iwl_nvm_access_target
+ * @type: &enum iwl_nvm_section_type
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwl_nvm_access_cmd {
+ u8 op_code;
+ u8 target;
+ __le16 type;
+ __le16 offset;
+ __le16 length;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwl_nvm_access_resp {
+ __le16 offset;
+ __le16 length;
+ __le16 type;
+ __le16 status;
+ u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+/*
+ * struct iwl_nvm_get_info - request to get NVM data
+ */
+struct iwl_nvm_get_info {
+ __le32 reserved;
+} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
+
+/**
+ * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
+ * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty
+ */
+enum iwl_nvm_info_general_flags {
+ NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0),
+};
+
+/**
+ * struct iwl_nvm_get_info_general - general NVM data
+ * @flags: bit 0: 1 - empty, 0 - non-empty
+ * @nvm_version: nvm version
+ * @board_type: board type
+ * @n_hw_addrs: number of reserved MAC addresses
+ */
+struct iwl_nvm_get_info_general {
+ __le32 flags;
+ __le16 nvm_version;
+ u8 board_type;
+ u8 n_hw_addrs;
+} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
+
+/**
+ * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
+ * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled
+ * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
+ * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
+ * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
+ * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
+ * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
+ * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
+ */
+enum iwl_nvm_mac_sku_flags {
+ NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0),
+ NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
+ NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
+ NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+ /**
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ */
+ NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
+ NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
+ NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
+ NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14),
+ NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15),
+};
+
+/**
+ * struct iwl_nvm_get_info_sku - mac information
+ * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
+ */
+struct iwl_nvm_get_info_sku {
+ __le32 mac_sku_flags;
+} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
+
+/**
+ * struct iwl_nvm_get_info_phy - phy information
+ * @tx_chains: BIT 0 chain A, BIT 1 chain B
+ * @rx_chains: BIT 0 chain A, BIT 1 chain B
+ */
+struct iwl_nvm_get_info_phy {
+ __le32 tx_chains;
+ __le32 rx_chains;
+} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+
+#define IWL_NUM_CHANNELS_V1 51
+#define IWL_NUM_CHANNELS 110
+
+/**
+ * struct iwl_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @channel_profile: regulatory data of this channel
+ * @reserved: reserved
+ */
+struct iwl_nvm_get_info_regulatory_v1 {
+ __le32 lar_enabled;
+ __le16 channel_profile[IWL_NUM_CHANNELS_V1];
+ __le16 reserved;
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+
+/**
+ * struct iwl_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @n_channels: number of valid channels in the array
+ * @channel_profile: regulatory data of this channel
+ */
+struct iwl_nvm_get_info_regulatory {
+ __le32 lar_enabled;
+ __le32 n_channels;
+ __le32 channel_profile[IWL_NUM_CHANNELS];
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
+
+/**
+ * struct iwl_nvm_get_info_rsp_v3 - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwl_nvm_get_info_rsp_v3 {
+ struct iwl_nvm_get_info_general general;
+ struct iwl_nvm_get_info_sku mac_sku;
+ struct iwl_nvm_get_info_phy phy_sku;
+ struct iwl_nvm_get_info_regulatory_v1 regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_nvm_get_info_rsp - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwl_nvm_get_info_rsp {
+ struct iwl_nvm_get_info_general general;
+ struct iwl_nvm_get_info_sku mac_sku;
+ struct iwl_nvm_get_info_phy phy_sku;
+ struct iwl_nvm_get_info_regulatory regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */
+
+/**
+ * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ * @reserved: reserved
+ */
+struct iwl_nvm_access_complete_cmd {
+ __le32 reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwl_mcc_source
+ * @reserved: reserved for alignment
+ * @key: integrity key for MCC API OEM testing
+ * @reserved2: reserved
+ */
+struct iwl_mcc_update_cmd {
+ __le16 mcc;
+ u8 source_id;
+ u8 reserved;
+ __le32 key;
+ u8 reserved2[20];
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
+
+/**
+ * enum iwl_geo_information - geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ * for the 5 GHz band.
+ */
+enum iwl_geo_information {
+ GEO_NO_INFO = 0,
+ GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
+};
+
+/**
+ * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ * see &enum iwl_geo_information.
+ * @n_channels: number of channels in @channels_data.
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwl_mcc_update_resp_v3 {
+ __le32 status;
+ __le16 mcc;
+ u8 cap;
+ u8 source_id;
+ __le16 time;
+ __le16 geo_info;
+ __le32 n_channels;
+ __le32 channels[];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+
+/**
+ * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ * see &enum iwl_geo_information.
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @reserved: for four bytes alignment.
+ * @n_channels: number of channels in @channels_data.
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwl_mcc_update_resp {
+ __le32 status;
+ __le16 mcc;
+ __le16 cap;
+ __le16 time;
+ __le16 geo_info;
+ u8 source_id;
+ u8 reserved[3];
+ __le32 n_channels;
+ __le32 channels[];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
+
+/**
+ * struct iwl_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see iwl_mcc_source
+ * @reserved1: reserved for alignment
+ */
+struct iwl_mcc_chub_notif {
+ __le16 mcc;
+ u8 source_id;
+ u8 reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwl_mcc_update_status {
+ MCC_RESP_NEW_CHAN_PROFILE,
+ MCC_RESP_SAME_CHAN_PROFILE,
+ MCC_RESP_INVALID,
+ MCC_RESP_NVM_DISABLED,
+ MCC_RESP_ILLEGAL,
+ MCC_RESP_LOW_PRIORITY,
+ MCC_RESP_TEST_MODE_ACTIVE,
+ MCC_RESP_TEST_MODE_NOT_ACTIVE,
+ MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
+};
+
+enum iwl_mcc_source {
+ MCC_SOURCE_OLD_FW = 0,
+ MCC_SOURCE_ME = 1,
+ MCC_SOURCE_BIOS = 2,
+ MCC_SOURCE_3G_LTE_HOST = 3,
+ MCC_SOURCE_3G_LTE_DEVICE = 4,
+ MCC_SOURCE_WIFI = 5,
+ MCC_SOURCE_RESERVED = 6,
+ MCC_SOURCE_DEFAULT = 7,
+ MCC_SOURCE_UNINITIALIZED = 8,
+ MCC_SOURCE_MCC_API = 9,
+ MCC_SOURCE_GET_CURRENT = 0x10,
+ MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
+};
+
+#define IWL_TAS_BLOCK_LIST_MAX 16
+/**
+ * struct iwl_tas_config_cmd_v2 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ */
+struct iwl_tas_config_cmd_v2 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ * indicates whether IEC regulatory is enabled or disabled
+ */
+struct iwl_tas_config_cmd_v3 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le16 override_tas_iec;
+ __le16 enable_tas_iec;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
+
+/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ * indicates whether IEC regulatory is enabled or disabled
+ * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
+ * @reserved: reserved
+*/
+struct iwl_tas_config_cmd_v4 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+ u8 reserved;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
+
+union iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_v2 v2;
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+};
+/**
+ * enum iwl_lari_configs - bit masks for the various LARI config operations
+ * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
+ * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
+ * @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled
+ * @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in
+ * Indonesia
+ */
+enum iwl_lari_config_masks {
+ LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0),
+ LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1),
+ LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2),
+ LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3),
+};
+
+#define IWL_11AX_UKRAINE_MASK 3
+#define IWL_11AX_UKRAINE_SHIFT 8
+
+/**
+ * struct iwl_lari_config_change_cmd_v1 - change LARI configuration
+ * @config_bitmap: bit map of the config commands. each bit will trigger a
+ * different predefined FW config operation
+ */
+struct iwl_lari_config_change_cmd_v1 {
+ __le32 config_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v2 - change LARI configuration
+ * @config_bitmap: bit map of the config commands. each bit will trigger a
+ * different predefined FW config operation
+ * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets
+ */
+struct iwl_lari_config_change_cmd_v2 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v3 - change LARI configuration
+ * @config_bitmap: bit map of the config commands. each bit will trigger a
+ * different predefined FW config operation
+ * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets
+ * @oem_11ax_allow_bitmap: bitmap of 11ax allowed MCCs.
+ * For each supported country, a pair of regulatory override bit and 11ax mode exist
+ * in the bit field.
+ */
+struct iwl_lari_config_change_cmd_v3 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v4 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ */
+struct iwl_lari_config_change_cmd_v4 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v5 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ * Each bit represents a country or region to activate, according to the BIOS
+ * definitions.
+ */
+struct iwl_lari_config_change_cmd_v5 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v6 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ * Each bit represents a country or region to activate, according to the BIOS
+ * definitions.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be disabled
+ */
+struct iwl_lari_config_change_cmd_v6 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
+
+/**
+ * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
+ * @status: PNVM image loading status
+ */
+struct iwl_pnvm_init_complete_ntfy {
+ __le32 status;
+} __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */
+
+#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
new file mode 100644
index 000000000..a0123f81f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2021-2022 Intel Corporation
+ */
+#ifndef __iwl_fw_api_offload_h__
+#define __iwl_fw_api_offload_h__
+
+/**
+ * enum iwl_prot_offload_subcmd_ids - protocol offload commands
+ */
+enum iwl_prot_offload_subcmd_ids {
+ /**
+ * @WOWLAN_WAKE_PKT_NOTIFICATION: Notification in &struct iwl_wowlan_wake_pkt_notif
+ */
+ WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC,
+
+ /**
+ * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif
+ */
+ WOWLAN_INFO_NOTIFICATION = 0xFD,
+
+ /**
+ * @D3_END_NOTIFICATION: End D3 state notification
+ */
+ D3_END_NOTIFICATION = 0xFE,
+
+ /**
+ * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
+ */
+ STORED_BEACON_NTF = 0xFF,
+};
+
+#define MAX_STORED_BEACON_SIZE 600
+
+/**
+ * struct iwl_stored_beacon_notif_common - Stored beacon notif common fields
+ *
+ * @system_time: system time on air rise
+ * @tsf: TSF on air rise
+ * @beacon_timestamp: beacon on air rise
+ * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition
+ * @channel: channel this beacon was received on
+ * @rates: rate in ucode internal format
+ * @byte_count: frame's byte count
+ */
+struct iwl_stored_beacon_notif_common {
+ __le32 system_time;
+ __le64 tsf;
+ __le32 beacon_timestamp;
+ __le16 band;
+ __le16 channel;
+ __le32 rates;
+ __le32 byte_count;
+} __packed;
+
+/**
+ * struct iwl_stored_beacon_notif - Stored beacon notification
+ *
+ * @common: fields common for all versions
+ * @data: beacon data, length in @byte_count
+ */
+struct iwl_stored_beacon_notif_v2 {
+ struct iwl_stored_beacon_notif_common common;
+ u8 data[MAX_STORED_BEACON_SIZE];
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
+
+/**
+ * struct iwl_stored_beacon_notif_v3 - Stored beacon notification
+ *
+ * @common: fields common for all versions
+ * @sta_id: station for which the beacon was received
+ * @reserved: reserved for alignment
+ * @data: beacon data, length in @byte_count
+ */
+struct iwl_stored_beacon_notif_v3 {
+ struct iwl_stored_beacon_notif_common common;
+ u8 sta_id;
+ u8 reserved[3];
+ u8 data[MAX_STORED_BEACON_SIZE];
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */
+
+#endif /* __iwl_fw_api_offload_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
new file mode 100644
index 000000000..db59a1897
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_paging_h__
+#define __iwl_fw_api_paging_h__
+
+#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
+
+/**
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+ */
+struct iwl_fw_paging_cmd {
+ __le32 flags;
+ __le32 block_size;
+ __le32 block_num;
+ __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_paging_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
new file mode 100644
index 000000000..e66f77924
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_phy_ctxt_h__
+#define __iwl_fw_api_phy_ctxt_h__
+
+/* Supported bands */
+#define PHY_BAND_5 (0)
+#define PHY_BAND_24 (1)
+#define PHY_BAND_6 (2)
+
+/* Supported channel width, vary if there is VHT support */
+#define PHY_VHT_CHANNEL_MODE20 (0x0)
+#define PHY_VHT_CHANNEL_MODE40 (0x1)
+#define PHY_VHT_CHANNEL_MODE80 (0x2)
+#define PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * struct iwl_fw_channel_info_v1 - channel information
+ *
+ * @band: PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwl_fw_channel_info_v1 {
+ u8 band;
+ u8 channel;
+ u8 width;
+ u8 ctrl_pos;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwl_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwl_fw_channel_info {
+ __le32 channel;
+ u8 band;
+ u8 width;
+ u8 ctrl_pos;
+ u8 reserved;
+} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
+
+#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define PHY_RX_CHAIN_VALID_POS (1)
+#define PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << PHY_RX_CHAIN_VALID_POS)
+#define PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define PHY_RX_CHAIN_CNT_POS (10)
+#define PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << PHY_RX_CHAIN_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define NUM_PHY_CTX 3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with
+ * various channel structures.
+ *
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd_tail {
+ __le32 txchain_info;
+ __le32 rxchain_info;
+ __le32 acquisition_data;
+ __le32 dsp_cfg_flags;
+} __packed;
+
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @ci: channel info
+ * @tail: command tail
+ */
+struct iwl_phy_context_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
+ __le32 apply_time;
+ __le32 tx_param_color;
+ struct iwl_fw_channel_info ci;
+ struct iwl_phy_context_cmd_tail tail;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @lmac_id: the lmac id the phy context belongs to
+ * @ci: channel info
+ * @rxchain_info: ???
+ * @dsp_cfg_flags: set to 0
+ * @reserved: reserved to align to 64 bit
+ */
+struct iwl_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
+ struct iwl_fw_channel_info ci;
+ __le32 lmac_id;
+ __le32 rxchain_info; /* reserved in _VER_4 */
+ __le32 dsp_cfg_flags;
+ __le32 reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+
+#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
new file mode 100644
index 000000000..b1b9c2985
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_phy_h__
+#define __iwl_fw_api_phy_h__
+
+/**
+ * enum iwl_phy_ops_subcmd_ids - PHY group commands
+ */
+enum iwl_phy_ops_subcmd_ids {
+ /**
+ * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE:
+ * Uses either &struct iwl_dts_measurement_cmd or
+ * &struct iwl_ext_dts_measurement_cmd
+ */
+ CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+
+ /**
+ * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
+ */
+ CTDP_CONFIG_CMD = 0x03,
+
+ /**
+ * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
+ */
+ TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+
+ /**
+ * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd
+ */
+ PER_CHAIN_LIMIT_OFFSET_CMD = 0x05,
+
+ /**
+ * @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd
+ */
+ PER_PLATFORM_ANT_GAIN_CMD = 0x07,
+
+ /**
+ * @CT_KILL_NOTIFICATION: &struct ct_kill_notif
+ */
+ CT_KILL_NOTIFICATION = 0xFE,
+
+ /**
+ * @DTS_MEASUREMENT_NOTIF_WIDE:
+ * &struct iwl_dts_measurement_notif_v1 or
+ * &struct iwl_dts_measurement_notif_v2
+ */
+ DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
+};
+
+/* DTS measurements */
+
+enum iwl_dts_measurement_flags {
+ DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0),
+ DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1),
+};
+
+/**
+ * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements
+ *
+ * @flags: indicates which measurements we want as specified in
+ * &enum iwl_dts_measurement_flags
+ */
+struct iwl_dts_measurement_cmd {
+ __le32 flags;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
+
+/**
+* enum iwl_dts_control_measurement_mode - DTS measurement type
+* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
+* back (latest value. Not waiting for new value). Use automatic
+* SW DTS configuration.
+* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
+* trigger DTS reading and provide read back temperature read
+* when available.
+* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
+* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
+* without measurement trigger.
+*/
+enum iwl_dts_control_measurement_mode {
+ DTS_AUTOMATIC = 0,
+ DTS_REQUEST_READ = 1,
+ DTS_OVER_WRITE = 2,
+ DTS_DIRECT_WITHOUT_MEASURE = 3,
+};
+
+/**
+* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
+* @DTS_USE_TOP: Top
+* @DTS_USE_CHAIN_A: chain A
+* @DTS_USE_CHAIN_B: chain B
+* @DTS_USE_CHAIN_C: chain C
+* @XTAL_TEMPERATURE: read temperature from xtal
+*/
+enum iwl_dts_used {
+ DTS_USE_TOP = 0,
+ DTS_USE_CHAIN_A = 1,
+ DTS_USE_CHAIN_B = 2,
+ DTS_USE_CHAIN_C = 3,
+ XTAL_TEMPERATURE = 4,
+};
+
+/**
+* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
+* @DTS_BIT6_MODE: bit 6 mode
+* @DTS_BIT8_MODE: bit 8 mode
+*/
+enum iwl_dts_bit_mode {
+ DTS_BIT6_MODE = 0,
+ DTS_BIT8_MODE = 1,
+};
+
+/**
+ * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements
+ * @control_mode: see &enum iwl_dts_control_measurement_mode
+ * @temperature: used when over write DTS mode is selected
+ * @sensor: set temperature sensor to use. See &enum iwl_dts_used
+ * @avg_factor: average factor to DTS in request DTS read mode
+ * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
+ * @step_duration: step duration for the DTS
+ */
+struct iwl_ext_dts_measurement_cmd {
+ __le32 control_mode;
+ __le32 temperature;
+ __le32 sensor;
+ __le32 avg_factor;
+ __le32 bit_mode;
+ __le32 step_duration;
+} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
+
+/**
+ * struct iwl_dts_measurement_notif_v1 - measurements notification
+ *
+ * @temp: the measured temperature
+ * @voltage: the measured voltage
+ */
+struct iwl_dts_measurement_notif_v1 {
+ __le32 temp;
+ __le32 voltage;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
+
+/**
+ * struct iwl_dts_measurement_notif_v2 - measurements notification
+ *
+ * @temp: the measured temperature
+ * @voltage: the measured voltage
+ * @threshold_idx: the trip index that was crossed
+ */
+struct iwl_dts_measurement_notif_v2 {
+ __le32 temp;
+ __le32 voltage;
+ __le32 threshold_idx;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
+
+/**
+ * struct iwl_dts_measurement_resp - measurements response
+ *
+ * @temp: the measured temperature
+ */
+struct iwl_dts_measurement_resp {
+ __le32 temp;
+} __packed; /* CMD_DTS_MEASUREMENT_RSP_API_S_VER_1 */
+
+/**
+ * struct ct_kill_notif - CT-kill entry notification
+ * This structure represent both versions of this notification.
+ *
+ * @temperature: the current temperature in celsius
+ * @dts: only in v2: DTS that trigger the CT Kill bitmap:
+ * bit 0: ToP master
+ * bit 1: PA chain A master
+ * bit 2: PA chain B master
+ * bit 3: ToP slave
+ * bit 4: PA chain A slave
+ * bit 5: PA chain B slave)
+ * bits 6,7: reserved (set to 0)
+ * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
+ */
+struct ct_kill_notif {
+ __le16 temperature;
+ u8 dts;
+ u8 scheme;
+} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
+
+/**
+* enum ctdp_cmd_operation - CTDP command operations
+* @CTDP_CMD_OPERATION_START: update the current budget
+* @CTDP_CMD_OPERATION_STOP: stop ctdp
+* @CTDP_CMD_OPERATION_REPORT: get the average budget
+*/
+enum iwl_mvm_ctdp_cmd_operation {
+ CTDP_CMD_OPERATION_START = 0x1,
+ CTDP_CMD_OPERATION_STOP = 0x2,
+ CTDP_CMD_OPERATION_REPORT = 0x4,
+};/* CTDP_CMD_OPERATION_TYPE_E */
+
+/**
+ * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
+ *
+ * @operation: see &enum iwl_mvm_ctdp_cmd_operation
+ * @budget: the budget in milliwatt
+ * @window_size: defined in API but not used
+ */
+struct iwl_mvm_ctdp_cmd {
+ __le32 operation;
+ __le32 budget;
+ __le32 window_size;
+} __packed;
+
+#define IWL_MAX_DTS_TRIPS 8
+
+/**
+ * struct temp_report_ths_cmd - set temperature thresholds
+ *
+ * @num_temps: number of temperature thresholds passed
+ * @thresholds: array with the thresholds to be configured
+ */
+struct temp_report_ths_cmd {
+ __le32 num_temps;
+ __le16 thresholds[IWL_MAX_DTS_TRIPS];
+} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
+
+#endif /* __iwl_fw_api_phy_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
new file mode 100644
index 000000000..f92cac1da
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_power_h__
+#define __iwl_fw_api_power_h__
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ * memory access
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ * reg change
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ * D0 to D3
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short
+ * idle timeout
+ */
+enum iwl_ltr_config_flags {
+ LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+ LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+ LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+ LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+ LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+ LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+ LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+ LTR_CFG_FLAG_UPDATE_VALUES = BIT(7),
+};
+
+/**
+ * struct iwl_ltr_config_cmd_v1 - configures the LTR
+ * @flags: See &enum iwl_ltr_config_flags
+ * @static_long: static LTR Long register value.
+ * @static_short: static LTR Short register value.
+ */
+struct iwl_ltr_config_cmd_v1 {
+ __le32 flags;
+ __le32 static_long;
+ __le32 static_short;
+} __packed; /* LTR_CAPABLE_API_S_VER_1 */
+
+#define LTR_VALID_STATES_NUM 4
+
+/**
+ * struct iwl_ltr_config_cmd - configures the LTR
+ * @flags: See &enum iwl_ltr_config_flags
+ * @static_long: static LTR Long register value.
+ * @static_short: static LTR Short register value.
+ * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order:
+ * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES
+ * is set.
+ * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if
+ * %LTR_CFG_FLAG_UPDATE_VALUES is set.
+ */
+struct iwl_ltr_config_cmd {
+ __le32 flags;
+ __le32 static_long;
+ __le32 static_short;
+ __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
+ __le32 ltr_short_idle_timeout;
+} __packed; /* LTR_CAPABLE_API_S_VER_2 */
+
+/* Radio LP RX Energy Threshold measured in dBm */
+#define POWER_LPRX_RSSI_THRESHOLD 75
+#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
+#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
+/**
+ * enum iwl_power_flags - masks for power table command flags
+ * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+ * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
+*/
+enum iwl_power_flags {
+ POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
+ POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
+ POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
+ POWER_FLAGS_BT_SCO_ENA = BIT(8),
+ POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
+ POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
+ POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
+};
+
+#define IWL_POWER_VEC_SIZE 5
+
+/**
+ * struct iwl_powertable_cmd - legacy power command. Beside old API support this
+ * is used also with a new power API for device wide power settings.
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @debug_flags: debug flags
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ */
+struct iwl_powertable_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ __le16 flags;
+ u8 keep_alive_seconds;
+ u8 debug_flags;
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 skip_dtim_periods;
+ __le32 lprx_rssi_threshold;
+} __packed;
+
+/**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
+ * '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK:
+ * Device Retention indication, '1' indicate retention is enabled.
+ * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK:
+ * 32Khz external slow clock valid indication, '1' indicate cloack is
+ * valid.
+*/
+enum iwl_device_power_flags {
+ DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1),
+ DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from &enum iwl_device_power_flags
+ * @reserved: reserved (padding)
+ */
+struct iwl_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ __le16 flags;
+ __le16 reserved;
+} __packed;
+
+/**
+ * struct iwl_mac_power_cmd - New power command containing uAPSD support
+ * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold: (unused)
+ * @reserved: reserved (padding)
+ */
+struct iwl_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ __le32 id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ __le16 flags;
+ __le16 keep_alive_seconds;
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 rx_data_timeout_uapsd;
+ __le32 tx_data_timeout_uapsd;
+ u8 lprx_rssi_threshold;
+ u8 skip_dtim_periods;
+ __le16 snooze_interval;
+ __le16 snooze_window;
+ u8 snooze_step;
+ u8 qndp_tid;
+ u8 uapsd_ac_flags;
+ u8 uapsd_max_sp;
+ u8 heavy_tx_thld_packets;
+ u8 heavy_rx_thld_packets;
+ u8 heavy_tx_thld_percentage;
+ u8 heavy_rx_thld_percentage;
+ u8 limited_ps_threshold;
+ u8 reserved;
+} __packed;
+
+/*
+ * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwl_uapsd_misbehaving_ap_notif {
+ __le32 sta_id;
+ u8 mac_id;
+ u8 reserved[3];
+} __packed;
+
+/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+ u8 flags;
+ u8 mac_context_id;
+ __le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
+enum iwl_dev_tx_power_cmd_mode {
+ IWL_TX_POWER_MODE_SET_MAC = 0,
+ IWL_TX_POWER_MODE_SET_DEVICE = 1,
+ IWL_TX_POWER_MODE_SET_CHAINS = 2,
+ IWL_TX_POWER_MODE_SET_ACK = 3,
+ IWL_TX_POWER_MODE_SET_SAR_TIMER = 4,
+ IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */;
+
+#define IWL_NUM_CHAIN_TABLES 1
+#define IWL_NUM_CHAIN_TABLES_V2 2
+#define IWL_NUM_CHAIN_LIMITS 2
+#define IWL_NUM_SUB_BANDS_V1 5
+#define IWL_NUM_SUB_BANDS_V2 11
+
+/**
+ * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd
+ * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in 1/8 dBms.
+ * @dev_24: device TX power restriction in 1/8 dBms
+ * @dev_52_low: device TX power restriction upper band - low
+ * @dev_52_high: device TX power restriction upper band - high
+ */
+struct iwl_dev_tx_power_common {
+ __le32 set_mode;
+ __le32 mac_context_id;
+ __le16 pwr_restriction;
+ __le16 dev_24;
+ __le16 dev_52_low;
+ __le16 dev_52_high;
+};
+
+/**
+ * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3
+ * @per_chain: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd_v3 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
+
+#define IWL_DEV_MAX_TX_POWER 0x7FFF
+
+/**
+ * struct iwl_dev_tx_power_cmd_v4 - TX power reduction command version 4
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @reserved: reserved (padding)
+ */
+struct iwl_dev_tx_power_cmd_v4 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
+ u8 enable_ack_reduction;
+ u8 reserved[3];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd_v5 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd_v6 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v7 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * @common: common part of the command
+ * @v3: version 3 part of the command
+ * @v4: version 4 part of the command
+ * @v5: version 5 part of the command
+ * @v6: version 6 part of the command
+ */
+struct iwl_dev_tx_power_cmd {
+ struct iwl_dev_tx_power_common common;
+ union {
+ struct iwl_dev_tx_power_cmd_v3 v3;
+ struct iwl_dev_tx_power_cmd_v4 v4;
+ struct iwl_dev_tx_power_cmd_v5 v5;
+ struct iwl_dev_tx_power_cmd_v6 v6;
+ struct iwl_dev_tx_power_cmd_v7 v7;
+ };
+};
+
+#define IWL_NUM_GEO_PROFILES 3
+#define IWL_NUM_GEO_PROFILES_V3 8
+#define IWL_NUM_BANDS_PER_CHAIN_V1 2
+#define IWL_NUM_BANDS_PER_CHAIN_V2 3
+
+/**
+ * enum iwl_geo_per_chain_offset_operation - type of operation
+ * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
+ * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
+ */
+enum iwl_geo_per_chain_offset_operation {
+ IWL_PER_CHAIN_OFFSET_SET_TABLES,
+ IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
+}; /* PER_CHAIN_OFFSET_OPERATION_E */
+
+/**
+ * struct iwl_per_chain_offset - embedded struct for PER_CHAIN_LIMIT_OFFSET_CMD.
+ * @max_tx_power: maximum allowed tx power.
+ * @chain_a: tx power offset for chain a.
+ * @chain_b: tx power offset for chain b.
+ */
+struct iwl_per_chain_offset {
+ __le16 max_tx_power;
+ u8 chain_a;
+ u8 chain_b;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ */
+struct iwl_geo_tx_power_profiles_cmd_v1 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
+ */
+struct iwl_geo_tx_power_profiles_cmd_v2 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
+ __le32 table_revision;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
+ */
+struct iwl_geo_tx_power_profiles_cmd_v3 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2];
+ __le32 table_revision;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
+ */
+struct iwl_geo_tx_power_profiles_cmd_v4 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V1];
+ __le32 table_revision;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
+ */
+struct iwl_geo_tx_power_profiles_cmd_v5 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V2];
+ __le32 table_revision;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */
+
+union iwl_geo_tx_power_profiles_cmd {
+ struct iwl_geo_tx_power_profiles_cmd_v1 v1;
+ struct iwl_geo_tx_power_profiles_cmd_v2 v2;
+ struct iwl_geo_tx_power_profiles_cmd_v3 v3;
+ struct iwl_geo_tx_power_profiles_cmd_v4 v4;
+ struct iwl_geo_tx_power_profiles_cmd_v5 v5;
+};
+
+/**
+ * struct iwl_geo_tx_power_profiles_resp - response to PER_CHAIN_LIMIT_OFFSET_CMD cmd
+ * @profile_idx: current geo profile in use
+ */
+struct iwl_geo_tx_power_profiles_resp {
+ __le32 profile_idx;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
+
+/**
+ * union iwl_ppag_table_cmd - union for all versions of PPAG command
+ * @v1: version 1
+ * @v2: version 2
+ *
+ * @flags: bit 0 - indicates enablement of PPAG for ETSI
+ * bit 1 - indicates enablement of PPAG for CHINA BIOS
+ * bit 1 can be used only in v3 (identical to v2)
+ * @gain: table of antenna gain values per chain and sub-band
+ * @reserved: reserved
+ */
+union iwl_ppag_table_cmd {
+ struct {
+ __le32 flags;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
+ s8 reserved[2];
+ } v1;
+ struct {
+ __le32 flags;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ s8 reserved[2];
+ } v2;
+} __packed;
+
+#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
+#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
+
+/**
+ * struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD
+ * @offset_map: mapping a mcc to a geo sar group
+ * @reserved: reserved
+ */
+struct iwl_sar_offset_mapping_cmd {
+ u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE]
+ [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE];
+ u16 reserved;
+} __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/
+
+/**
+ * struct iwl_beacon_filter_cmd
+ * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ * to driver if delta in Energy values calculated for this and last
+ * passed beacon is greater than this threshold. Zero value means that
+ * the Energy change is ignored for beacon filtering, and beacon will
+ * not be forced to be sent to driver regardless of this delta. Typical
+ * energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ * Send beacon to driver if delta in Energy values calculated for this
+ * and last passed beacon is greater than this threshold. Zero value
+ * means that the Energy change is ignored for beacon filtering while in
+ * Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ * calculated for current beacon is less than the threshold, use
+ * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ * Threshold. Typical energy threshold is -72dBm.
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_debug_flag: beacon filtering debug configuration
+ * @bf_escape_timer: Send beacons to to driver if no beacons were passed
+ * for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ * for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ * @bf_threshold_absolute_low: See below.
+ * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated
+ * for this beacon crossed this absolute threshold. For the 'Increase'
+ * direction the bf_energy_absolute_low[i] is used. For the 'Decrease'
+ * direction the bf_energy_absolute_high[i] is used. Zero value means
+ * that this specific threshold is ignored for beacon filtering, and
+ * beacon will not be forced to be sent to driver due to this setting.
+ */
+struct iwl_beacon_filter_cmd {
+ __le32 bf_energy_delta;
+ __le32 bf_roaming_energy_delta;
+ __le32 bf_roaming_state;
+ __le32 bf_temp_threshold;
+ __le32 bf_temp_fast_filter;
+ __le32 bf_temp_slow_filter;
+ __le32 bf_enable_beacon_filter;
+ __le32 bf_debug_flag;
+ __le32 bf_escape_timer;
+ __le32 ba_escape_timer;
+ __le32 ba_enable_beacon_abort;
+ __le32 bf_threshold_absolute_low[2];
+ __le32 bf_threshold_absolute_high[2];
+} __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */
+
+/* Beacon filtering and beacon abort */
+#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
+#define IWL_BF_ENERGY_DELTA_MAX 255
+#define IWL_BF_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
+#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
+#define IWL_BF_ROAMING_STATE_MAX 255
+#define IWL_BF_ROAMING_STATE_MIN 0
+
+#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
+#define IWL_BF_TEMP_THRESHOLD_MAX 255
+#define IWL_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
+#define IWL_BF_TEMP_FAST_FILTER_MAX 255
+#define IWL_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
+#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
+
+#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
+
+#define IWL_BF_ESCAPE_TIMER_DEFAULT 0
+#define IWL_BF_ESCAPE_TIMER_D0I3 0
+#define IWL_BF_ESCAPE_TIMER_MAX 1024
+#define IWL_BF_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
+#define IWL_BA_ESCAPE_TIMER_MAX 1024
+#define IWL_BA_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWL_BF_CMD_CONFIG(mode) \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
+
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
+#endif /* __iwl_fw_api_power_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
new file mode 100644
index 000000000..1a84a4081
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2020-2021 Intel Corporation
+ */
+#ifndef __iwl_fw_api_rfi_h__
+#define __iwl_fw_api_rfi_h__
+
+#define IWL_RFI_LUT_ENTRY_CHANNELS_NUM 15
+#define IWL_RFI_LUT_SIZE 24
+#define IWL_RFI_LUT_INSTALLED_SIZE 4
+
+/**
+ * struct iwl_rfi_lut_entry - an entry in the RFI frequency LUT.
+ *
+ * @freq: frequency
+ * @channels: channels that can be interfered at frequency freq (at most 15)
+ * @bands: the corresponding bands
+ */
+struct iwl_rfi_lut_entry {
+ __le16 freq;
+ u8 channels[IWL_RFI_LUT_ENTRY_CHANNELS_NUM];
+ u8 bands[IWL_RFI_LUT_ENTRY_CHANNELS_NUM];
+} __packed;
+
+/**
+ * struct iwl_rfi_config_cmd - RFI configuration table
+ *
+ * @entry: a table can have 24 frequency/channel mappings
+ * @oem: specifies if this is the default table or set by OEM
+ */
+struct iwl_rfi_config_cmd {
+ struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE];
+ u8 oem;
+ u8 reserved[3];
+} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * iwl_rfi_freq_table_status - status of the frequency table query
+ * @RFI_FREQ_TABLE_OK: can be used
+ * @RFI_FREQ_TABLE_DVFS_NOT_READY: DVFS is not ready yet, should try later
+ * @RFI_FREQ_TABLE_DISABLED: the feature is disabled in FW
+ */
+enum iwl_rfi_freq_table_status {
+ RFI_FREQ_TABLE_OK,
+ RFI_FREQ_TABLE_DVFS_NOT_READY,
+ RFI_FREQ_TABLE_DISABLED,
+};
+
+/**
+ * struct iwl_rfi_freq_table_resp_cmd - get the rfi freq table used by FW
+ *
+ * @table: table used by FW
+ * @status: see &iwl_rfi_freq_table_status
+ */
+struct iwl_rfi_freq_table_resp_cmd {
+ struct iwl_rfi_lut_entry table[IWL_RFI_LUT_INSTALLED_SIZE];
+ __le32 status;
+} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
+ *
+ * @reason: used only for a log message
+ */
+struct iwl_rfi_deactivate_notif {
+ __le32 reason;
+} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
+#endif /* __iwl_fw_api_rfi_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
new file mode 100644
index 000000000..687f804c4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -0,0 +1,761 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_rs_h__
+#define __iwl_fw_api_rs_h__
+
+#include "mac.h"
+
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
+ * bandwidths <= 80MHz
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz
+ * bandwidth
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 1 spatial
+ * stream
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 2 spatial
+ * streams
+ */
+enum iwl_tlc_mng_cfg_flags {
+ IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
+ IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
+ IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2),
+ IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
+ IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw - channel width options
+ * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw {
+ IWL_TLC_MNG_CH_WIDTH_20MHZ,
+ IWL_TLC_MNG_CH_WIDTH_40MHZ,
+ IWL_TLC_MNG_CH_WIDTH_80MHZ,
+ IWL_TLC_MNG_CH_WIDTH_160MHZ,
+ IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ */
+enum iwl_tlc_mng_cfg_chains {
+ IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+ IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode {
+ IWL_TLC_MNG_MODE_CCK = 0,
+ IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+ IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+ IWL_TLC_MNG_MODE_HT,
+ IWL_TLC_MNG_MODE_VHT,
+ IWL_TLC_MNG_MODE_HE,
+ IWL_TLC_MNG_MODE_INVALID,
+ IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10
+ * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates {
+ IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+ IWL_TLC_MNG_HT_RATE_MCS1,
+ IWL_TLC_MNG_HT_RATE_MCS2,
+ IWL_TLC_MNG_HT_RATE_MCS3,
+ IWL_TLC_MNG_HT_RATE_MCS4,
+ IWL_TLC_MNG_HT_RATE_MCS5,
+ IWL_TLC_MNG_HT_RATE_MCS6,
+ IWL_TLC_MNG_HT_RATE_MCS7,
+ IWL_TLC_MNG_HT_RATE_MCS8,
+ IWL_TLC_MNG_HT_RATE_MCS9,
+ IWL_TLC_MNG_HT_RATE_MCS10,
+ IWL_TLC_MNG_HT_RATE_MCS11,
+ IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11,
+};
+
+enum IWL_TLC_MNG_NSS {
+ IWL_TLC_NSS_1,
+ IWL_TLC_NSS_2,
+ IWL_TLC_NSS_MAX
+};
+
+/**
+ * enum IWL_TLC_MCS_PER_BW - mcs index per BW
+ * @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz
+ * @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz
+ * @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz
+ * @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3
+ * @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4
+ */
+enum IWL_TLC_MCS_PER_BW {
+ IWL_TLC_MCS_PER_BW_80,
+ IWL_TLC_MCS_PER_BW_160,
+ IWL_TLC_MCS_PER_BW_320,
+ IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1,
+ IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1,
+};
+
+/**
+ * struct iwl_tlc_config_cmd_v3 - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @amsdu: TX amsdu is supported
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW
+ * <nss, channel-width> pair (0 - 80mhz width and below, 1 - 160mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(@enum iwl_tlc_mng_cfg_cw)
+ * @reserved2: reserved
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd_v3 {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 amsdu;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3];
+ __le16 max_mpdu_len;
+ u8 sgi_ch_width_supp;
+ u8 reserved2;
+ __le32 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
+
+/**
+ * struct iwl_tlc_config_cmd_v4 - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(&enum iwl_tlc_mng_cfg_cw)
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd_v4 {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 sgi_ch_width_supp;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
+ __le16 max_mpdu_len;
+ __le16 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
+
+/**
+ * enum iwl_tlc_update_flags - updated fields
+ * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
+ * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
+ */
+enum iwl_tlc_update_flags {
+ IWL_TLC_NOTIF_FLAG_RATE = BIT(0),
+ IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1),
+};
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @rate: current initial rate
+ * @amsdu_size: Max AMSDU size, in bytes
+ * @amsdu_enabled: bitmap for per-TID AMSDU enablement
+ */
+struct iwl_tlc_update_notif {
+ u8 sta_id;
+ u8 reserved[3];
+ __le32 flags;
+ __le32 rate;
+ __le32 amsdu_size;
+ __le32 amsdu_enabled;
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
+ */
+enum {
+ IWL_RATE_1M_INDEX = 0,
+ IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+ IWL_RATE_2M_INDEX,
+ IWL_RATE_5M_INDEX,
+ IWL_RATE_11M_INDEX,
+ IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+ IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+ IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
+ IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX,
+ IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
+ IWL_RATE_18M_INDEX,
+ IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX,
+ IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
+ IWL_RATE_36M_INDEX,
+ IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX,
+ IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
+ IWL_RATE_54M_INDEX,
+ IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
+ IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
+ IWL_RATE_60M_INDEX,
+ IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+ IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+ IWL_RATE_MCS_8_INDEX,
+ IWL_RATE_MCS_9_INDEX,
+ IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+ IWL_RATE_MCS_10_INDEX,
+ IWL_RATE_MCS_11_INDEX,
+ IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
+ IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
+ IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+enum {
+ IWL_RATE_6M_PLCP = 13,
+ IWL_RATE_9M_PLCP = 15,
+ IWL_RATE_12M_PLCP = 5,
+ IWL_RATE_18M_PLCP = 7,
+ IWL_RATE_24M_PLCP = 9,
+ IWL_RATE_36M_PLCP = 11,
+ IWL_RATE_48M_PLCP = 1,
+ IWL_RATE_54M_PLCP = 3,
+ IWL_RATE_1M_PLCP = 10,
+ IWL_RATE_2M_PLCP = 20,
+ IWL_RATE_5M_PLCP = 55,
+ IWL_RATE_11M_PLCP = 110,
+ IWL_RATE_INVM_PLCP = -1,
+};
+
+/*
+ * rate_n_flags bit fields version 1
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK_V1 BIT(RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS_V1 9
+#define RATE_MCS_CCK_MSK_V1 BIT(RATE_MCS_CCK_POS_V1)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define RATE_MCS_VHT_POS_V1 26
+#define RATE_MCS_VHT_MSK_V1 BIT(RATE_MCS_VHT_POS_V1)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define RATE_HT_MCS_RATE_CODE_MSK_V1 0x7
+#define RATE_HT_MCS_NSS_POS_V1 3
+#define RATE_HT_MCS_NSS_MSK_V1 (3 << RATE_HT_MCS_NSS_POS_V1)
+#define RATE_HT_MCS_MIMO2_MSK BIT(RATE_HT_MCS_NSS_POS_V1)
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_HT_MCS_GF_POS 10
+#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS)
+
+#define RATE_HT_MCS_INDEX_MSK_V1 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define RATE_VHT_MCS_NSS_POS 4
+#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS)
+#define RATE_VHT_MCS_MIMO2_MSK BIT(RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define RATE_LEGACY_RATE_MSK_V1 0xff
+
+/* Bit 10 - OFDM HE */
+#define RATE_MCS_HE_POS_V1 10
+#define RATE_MCS_HE_MSK_V1 BIT(RATE_MCS_HE_POS_V1)
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define RATE_MCS_CHAN_WIDTH_POS 11
+#define RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS_V1 13
+#define RATE_MCS_SGI_MSK_V1 BIT(RATE_MCS_SGI_POS_V1)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define RATE_MCS_ANT_POS 14
+#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \
+ RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_MSK RATE_MCS_ANT_AB_MSK
+
+/* Bit 17: (0) SS, (1) SS*2 */
+#define RATE_MCS_STBC_POS 17
+#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS)
+
+/* Bit 18: OFDM-HE dual carrier mode */
+#define RATE_HE_DUAL_CARRIER_MODE 18
+#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define RATE_MCS_BF_POS 19
+#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
+
+/*
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
+ */
+#define RATE_MCS_HE_GI_LTF_POS 20
+#define RATE_MCS_HE_GI_LTF_MSK_V1 (3 << RATE_MCS_HE_GI_LTF_POS)
+
+/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define RATE_MCS_HE_TYPE_POS_V1 22
+#define RATE_MCS_HE_TYPE_SU_V1 (0 << RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_EXT_SU_V1 BIT(RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_MU_V1 (2 << RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_TRIG_V1 (3 << RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_MSK_V1 (3 << RATE_MCS_HE_TYPE_POS_V1)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define RATE_MCS_DUP_POS_V1 24
+#define RATE_MCS_DUP_MSK_V1 (3 << RATE_MCS_DUP_POS_V1)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS_V1 27
+#define RATE_MCS_LDPC_MSK_V1 BIT(RATE_MCS_LDPC_POS_V1)
+
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS_V1 28
+#define RATE_MCS_HE_106T_MSK_V1 BIT(RATE_MCS_HE_106T_POS_V1)
+
+/* Bit 30-31: (1) RTS, (2) CTS */
+#define RATE_MCS_RTS_REQUIRED_POS (30)
+#define RATE_MCS_RTS_REQUIRED_MSK (0x1 << RATE_MCS_RTS_REQUIRED_POS)
+
+#define RATE_MCS_CTS_REQUIRED_POS (31)
+#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS)
+
+/* rate_n_flags bit field version 2
+ *
+ * The 32-bit value has different layouts in the low 8 bits depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ */
+
+/* Bits 10-8: rate format
+ * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT)
+ * (3) Very High-throughput (VHT) (4) High-efficiency (HE)
+ * (5) Extremely High-throughput (EHT)
+ */
+#define RATE_MCS_MOD_TYPE_POS 8
+#define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS)
+
+/*
+ * Legacy CCK rate format for bits 0:3:
+ *
+ * (0) 0xa - 1 Mbps
+ * (1) 0x14 - 2 Mbps
+ * (2) 0x37 - 5.5 Mbps
+ * (3) 0x6e - 11 nbps
+ *
+ * Legacy OFDM rate format for bis 3:0:
+ *
+ * (0) 6 Mbps
+ * (1) 9 Mbps
+ * (2) 12 Mbps
+ * (3) 18 Mbps
+ * (4) 24 Mbps
+ * (5) 36 Mbps
+ * (6) 48 Mbps
+ * (7) 54 Mbps
+ *
+ */
+#define RATE_LEGACY_RATE_MSK 0x7
+
+/*
+ * HT, VHT, HE, EHT rate format for bits 3:0
+ * 3-0: MCS
+ *
+ */
+#define RATE_HT_MCS_CODE_MSK 0x7
+#define RATE_MCS_NSS_POS 4
+#define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS)
+#define RATE_MCS_CODE_MSK 0xf
+#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \
+ ((r) & RATE_HT_MCS_CODE_MSK))
+
+/* Bits 7-5: reserved */
+
+/*
+ * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz
+ */
+#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_320 (4 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 15-14: Antenna selection:
+ * Bit 14: Ant A active
+ * Bit 15: Ant B active
+ *
+ * All relevant definitions are same as in v1
+ */
+
+/* Bit 16 (1) LDPC enables, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS 16
+#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+
+/* Bit 17: (0) SS, (1) SS*2 (same as v1) */
+
+/* Bit 18: OFDM-HE dual carrier mode (same as v1) */
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */
+
+/*
+ * Bit 22-20: HE LTF type and guard interval
+ * CCK:
+ * 0 long preamble
+ * 1 short preamble
+ * HT/VHT:
+ * 0 0.8us
+ * 1 0.4us
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * 4 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * */
+#define RATE_MCS_HE_GI_LTF_MSK (0x7 << RATE_MCS_HE_GI_LTF_POS)
+#define RATE_MCS_SGI_POS RATE_MCS_HE_GI_LTF_POS
+#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
+#define RATE_MCS_HE_SU_4_LTF 3
+#define RATE_MCS_HE_SU_4_LTF_08_GI 4
+
+/* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define RATE_MCS_HE_TYPE_POS 23
+#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
+
+/* Bit 25: duplicate channel enabled
+ *
+ * if this bit is set, duplicate is according to BW (bits 11-13):
+ *
+ * CCK: 2x 20MHz
+ * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16)
+ * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160)
+ * */
+#define RATE_MCS_DUP_POS 25
+#define RATE_MCS_DUP_MSK (1 << RATE_MCS_DUP_POS)
+
+/* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS 26
+#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
+
+/* Bit 27: EHT extra LTF:
+ * instead of 1 LTF for SISO use 2 LTFs,
+ * instead of 2 LTFs for NSTS=2 use 4 LTFs*/
+#define RATE_MCS_EHT_EXTRA_LTF_POS 27
+#define RATE_MCS_EHT_EXTRA_LTF_MSK (1 << RATE_MCS_EHT_EXTRA_LTF_POS)
+
+/* Bit 31-28: reserved */
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define LQ_FLAG_USE_RTS_POS 0
+#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define LQ_FLAG_COLOR_POS 1
+#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\
+ LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+ LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define LQ_FLAG_RTS_BW_SIG_POS 4
+#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define LQ_FLAG_DYNAMIC_BW_POS 6
+#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
+
+/* Single Stream Tx Parameters (lq_cmd->ss_params)
+ * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
+ * used for single stream Tx.
+ */
+
+/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
+ * (0) - No STBC allowed
+ * (1) - 2x1 STBC allowed (HT/VHT)
+ * (2) - 4x2 STBC allowed (HT/VHT)
+ * (3) - 3x2 STBC allowed (HT only)
+ * All our chips are at most 2 antennas so only (1) is valid for now.
+ */
+#define LQ_SS_STBC_ALLOWED_POS 0
+#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK)
+
+/* 2x1 STBC is allowed */
+#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS)
+
+/* Bit 2: Beamformer (VHT only) is allowed */
+#define LQ_SS_BFER_ALLOWED_POS 2
+#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS)
+
+/* Bit 3: Force BFER or STBC for testing
+ * If this is set:
+ * If BFER is allowed then force the ucode to choose BFER else
+ * If STBC is allowed then force the ucode to choose STBC over SISO
+ */
+#define LQ_SS_FORCE_POS 3
+#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS)
+
+/* Bit 31: ss_params field is valid. Used for FW backward compatibility
+ * with other drivers which don't support the ss_params API yet
+ */
+#define LQ_SS_PARAMS_VALID_POS 31
+#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS)
+
+/**
+ * struct iwl_lq_cmd - link quality command
+ * @sta_id: station to update
+ * @reduced_tpc: reduced transmit power control value
+ * @control: not used
+ * @flags: combination of LQ_FLAG_*
+ * @mimo_delim: the first SISO index in rs_table, which separates MIMO
+ * and SISO rates
+ * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
+ * Should be ANT_[ABC]
+ * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
+ * @initial_rate_index: first index from rs_table per AC category
+ * @agg_time_limit: aggregation max time threshold in usec/100, meaning
+ * value of 100 is one usec. Range is 100 to 8000
+ * @agg_disable_start_th: try-count threshold for starting aggregation.
+ * If a frame has higher try-count, it should not be selected for
+ * starting an aggregation sequence.
+ * @agg_frame_cnt_limit: max frame count in an aggregation.
+ * 0: no limit
+ * 1: no aggregation (one frame per aggregation)
+ * 2 - 0x3f: maximal number of frames (up to 3f == 63)
+ * @reserved2: reserved
+ * @rs_table: array of rates for each TX try, each is rate_n_flags,
+ * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
+ * @ss_params: single stream features. declare whether STBC or BFER are allowed.
+ */
+struct iwl_lq_cmd {
+ u8 sta_id;
+ u8 reduced_tpc;
+ __le16 control;
+ /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+ u8 flags;
+ u8 mimo_delim;
+ u8 single_stream_ant_msk;
+ u8 dual_stream_ant_msk;
+ u8 initial_rate_index[AC_NUM];
+ /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+ __le16 agg_time_limit;
+ u8 agg_disable_start_th;
+ u8 agg_frame_cnt_limit;
+ __le32 reserved2;
+ __le32 rs_table[LQ_MAX_RETRY_NUM];
+ __le32 ss_params;
+}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+
+u8 iwl_fw_rate_idx_to_plcp(int idx);
+u32 iwl_new_rate_from_v1(u32 rate_v1);
+const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
+const char *iwl_rs_pretty_ant(u8 ant);
+const char *iwl_rs_pretty_bw(int bw);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
+bool iwl_he_is_sgi(u32 rate_n_flags);
+
+#endif /* __iwl_fw_api_rs_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
new file mode 100644
index 000000000..74a018887
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -0,0 +1,849 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_rx_h__
+#define __iwl_fw_api_rx_h__
+
+/* API for pre-9000 hardware */
+
+#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
+
+enum iwl_mac_context_info {
+ MAC_CONTEXT_INFO_NONE,
+ MAC_CONTEXT_INFO_GSCAN,
+};
+
+/**
+ * struct iwl_rx_phy_info - phy info
+ * (REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1: reserved
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy: for various implementations of non_cfg_phy
+ * @rate_n_flags: RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ * @mac_context_info: additional info on the context in which the frame was
+ * received as defined in &enum iwl_mac_context_info
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwl_rx_phy_info {
+ u8 non_cfg_phy_cnt;
+ u8 cfg_phy_cnt;
+ u8 stat_id;
+ u8 reserved1;
+ __le32 system_timestamp;
+ __le64 timestamp;
+ __le32 beacon_time_stamp;
+ __le16 phy_flags;
+ __le16 channel;
+ __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+ __le32 rate_n_flags;
+ __le32 byte_count;
+ u8 mac_active_msk;
+ u8 mac_context_info;
+ __le16 frame_time;
+} __packed;
+
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+ CSUM_RXA_RESERVED_MASK = 0x000f,
+ CSUM_RXA_MICSIZE_MASK = 0x00f0,
+ CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+ CSUM_RXA_PADD = BIT(13),
+ CSUM_RXA_AMSDU = BIT(14),
+ CSUM_RXA_ENA = BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @byte_count: byte count of the frame
+ * @assist: see &enum iwl_csum_rx_assist_info
+ */
+struct iwl_rx_mpdu_res_start {
+ __le16 byte_count;
+ __le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
+
+/**
+ * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
+ * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @RX_RES_PHY_FLAGS_MOD_CCK: modulation is CCK
+ * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @RX_RES_PHY_FLAGS_NARROW_BAND: narrow band (<20 MHz) receive
+ * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @RX_RES_PHY_FLAGS_ANTENNA_POS: antenna bit position
+ * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwl_rx_phy_flags {
+ RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
+ RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
+ RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
+ RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
+ RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
+ RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
+ RX_RES_PHY_FLAGS_AGG = BIT(7),
+ RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
+ RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
+ RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
+};
+
+/**
+ * enum iwl_mvm_rx_status - written by fw for each Rx packet
+ * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found
+ * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid
+ * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames.
+ * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension
+ * algorithm
+ * @RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: this frame is protected using
+ * CMAC or GMAC
+ * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
+ * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
+ * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
+ */
+enum iwl_mvm_rx_status {
+ RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
+ RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
+ RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
+ RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
+ RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
+ RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
+ RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+ RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
+ RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
+ RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
+ RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+ RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
+ RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC = (6 << 8),
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
+ RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
+ RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
+ RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
+ RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
+ RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
+ RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
+};
+
+/* 9000 series API */
+enum iwl_rx_mpdu_mac_flags1 {
+ IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03,
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0,
+ /* shift should be 4, but the length is measured in 2-byte
+ * words, so shifting only by 3 gives a byte result
+ */
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3,
+};
+
+enum iwl_rx_mpdu_mac_flags2 {
+ /* in 2-byte words */
+ IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f,
+ IWL_RX_MPDU_MFLG2_PAD = 0x20,
+ IWL_RX_MPDU_MFLG2_AMSDU = 0x40,
+};
+
+enum iwl_rx_mpdu_amsdu_info {
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f,
+ IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
+};
+
+#define RX_MPDU_BAND_POS 6
+#define RX_MPDU_BAND_MASK 0xC0
+#define BAND_IN_RX_STATUS(_val) \
+ (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
+
+enum iwl_rx_l3_proto_values {
+ IWL_RX_L3_TYPE_NONE,
+ IWL_RX_L3_TYPE_IPV4,
+ IWL_RX_L3_TYPE_IPV4_FRAG,
+ IWL_RX_L3_TYPE_IPV6_FRAG,
+ IWL_RX_L3_TYPE_IPV6,
+ IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+ IWL_RX_L3_TYPE_ARP,
+ IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
+enum iwl_rx_l3l4_flags {
+ IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
+ IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
+ IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
+ IWL_RX_L3L4_TCP_ACK = BIT(3),
+ IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
+ IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
+ IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
+};
+
+enum iwl_rx_mpdu_status {
+ IWL_RX_MPDU_STATUS_CRC_OK = BIT(0),
+ IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1),
+ IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2),
+ IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3),
+ IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
+ IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
+ IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+ /* overlayed since IWL_UCODE_TLV_API_DEPRECATE_TTAK */
+ IWL_RX_MPDU_STATUS_REPLAY_ERROR = BIT(7),
+ IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8,
+ IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK,
+ IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8,
+ IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8,
+ IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8,
+ IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8,
+ IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8,
+ IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8,
+ IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11),
+ IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
+
+ IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22),
+
+ IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000,
+};
+
+#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f
+
+enum iwl_rx_mpdu_reorder_data {
+ IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff,
+ IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000,
+ IWL_RX_MPDU_REORDER_SN_SHIFT = 12,
+ IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000,
+ IWL_RX_MPDU_REORDER_BAID_SHIFT = 24,
+ IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000,
+};
+
+enum iwl_rx_mpdu_phy_info {
+ IWL_RX_MPDU_PHY_AMPDU = BIT(5),
+ IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
+ IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
+ /* short preamble is only for CCK, for non-CCK overridden by this */
+ IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7),
+ IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
+};
+
+enum iwl_rx_mpdu_mac_info {
+ IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f,
+ IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
+};
+
+/* TSF overload low dword */
+enum iwl_rx_phy_data0 {
+ /* info type: HE any */
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001,
+ IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002,
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00,
+ /* 1 bit reserved */
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000,
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000,
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000,
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000,
+ IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000,
+ /* 6 bits reserved */
+ IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000,
+};
+
+enum iwl_rx_phy_info_type {
+ IWL_RX_PHY_INFO_TYPE_NONE = 0,
+ IWL_RX_PHY_INFO_TYPE_CCK = 1,
+ IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2,
+ IWL_RX_PHY_INFO_TYPE_HT = 3,
+ IWL_RX_PHY_INFO_TYPE_VHT_SU = 4,
+ IWL_RX_PHY_INFO_TYPE_VHT_MU = 5,
+ IWL_RX_PHY_INFO_TYPE_HE_SU = 6,
+ IWL_RX_PHY_INFO_TYPE_HE_MU = 7,
+ IWL_RX_PHY_INFO_TYPE_HE_TB = 8,
+ IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9,
+ IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10,
+};
+
+/* TSF overload high dword */
+enum iwl_rx_phy_data1 {
+ /*
+ * check this first - if TSF overload is set,
+ * see &enum iwl_rx_phy_info_type
+ */
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000,
+
+ /* info type: HT/VHT/HE any */
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000,
+
+ /* info type: HE MU/MU-EXT */
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001,
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e,
+
+ /* info type: HE any */
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0,
+ IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100,
+ /* trigger encoded */
+ IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00,
+
+ /* info type: HE TB/TX-EXT */
+ IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001,
+ IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e,
+};
+
+/* goes into Metadata DW 7 */
+enum iwl_rx_phy_data2 {
+ /* info type: HE MU-EXT */
+ /* the a1/a2/... is what the PHY/firmware calls the values */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */
+
+ /* info type: HE TB-EXT */
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000,
+};
+
+/* goes into Metadata DW 8 */
+enum iwl_rx_phy_data3 {
+ /* info type: HE MU-EXT */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */
+};
+
+/* goes into Metadata DW 4 high 16 bits */
+enum iwl_rx_phy_data4 {
+ /* info type: HE MU-EXT */
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+ /* DW7 - carries rss_hash only when rpa_en == 1 */
+ union {
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+
+ /**
+ * @phy_data2: depends on info type (see @phy_data1)
+ */
+ __le32 phy_data2;
+ };
+
+ /* DW8 - carries filter_match only when rpa_en == 1 */
+ union {
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+
+ /**
+ * @phy_data3: depends on info type (see @phy_data1)
+ */
+ __le32 phy_data3;
+ };
+
+ /* DW9 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW10 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW11 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW12 & DW13 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+
+ struct {
+ /**
+ * @phy_data0: depends on info_type, see @phy_data1
+ */
+ __le32 phy_data0;
+ /**
+ * @phy_data1: valid only if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+ * see &enum iwl_rx_phy_data1.
+ */
+ __le32 phy_data1;
+ };
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_4 */
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+ /* DW7 - carries filter_match only when rpa_en == 1 */
+ union {
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+
+ /**
+ * @phy_data3: depends on info type (see @phy_data1)
+ */
+ __le32 phy_data3;
+ };
+
+ /* DW8 - carries rss_hash only when rpa_en == 1 */
+ union {
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+
+ /**
+ * @phy_data2: depends on info type (see @phy_data1)
+ */
+ __le32 phy_data2;
+ };
+ /* DW9 */
+ /**
+ * @partial_hash: 31:0 ip/tcp header hash
+ * w/o some fields (such as IP SRC addr)
+ */
+ __le32 partial_hash;
+ /* DW10 */
+ /**
+ * @raw_xsum: raw xsum value
+ */
+ __be16 raw_xsum;
+ /**
+ * @reserved_xsum: reserved high bits in the raw checksum
+ */
+ __le16 reserved_xsum;
+ /* DW11 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW12 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW13 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW14 & DW15 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+
+ struct {
+ /**
+ * @phy_data0: depends on info_type, see @phy_data1
+ */
+ __le32 phy_data0;
+ /**
+ * @phy_data1: valid only if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+ * see &enum iwl_rx_phy_data1.
+ */
+ __le32 phy_data1;
+ };
+ };
+ /* DW16 & DW17 */
+ /**
+ * @reserved: reserved
+ */
+ __le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_5 */
+
+/**
+ * struct iwl_rx_mpdu_desc - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc {
+ /* DW2 */
+ /**
+ * @mpdu_len: MPDU length
+ */
+ __le16 mpdu_len;
+ /**
+ * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
+ */
+ u8 mac_flags1;
+ /**
+ * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
+ */
+ u8 mac_flags2;
+ /* DW3 */
+ /**
+ * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
+ */
+ u8 amsdu_info;
+ /**
+ * @phy_info: &enum iwl_rx_mpdu_phy_info
+ */
+ __le16 phy_info;
+ /**
+ * @mac_phy_idx: MAC/PHY index
+ */
+ u8 mac_phy_idx;
+ /* DW4 - carries csum data only when rpa_en == 1 */
+ /**
+ * @raw_csum: raw checksum (alledgedly unreliable)
+ */
+ __le16 raw_csum;
+
+ union {
+ /**
+ * @l3l4_flags: &enum iwl_rx_l3l4_flags
+ */
+ __le16 l3l4_flags;
+
+ /**
+ * @phy_data4: depends on info type, see phy_data1
+ */
+ __le16 phy_data4;
+ };
+ /* DW5 */
+ /**
+ * @status: &enum iwl_rx_mpdu_status
+ */
+ __le32 status;
+
+ /* DW6 */
+ /**
+ * @reorder_data: &enum iwl_rx_mpdu_reorder_data
+ */
+ __le32 reorder_data;
+
+ union {
+ struct iwl_rx_mpdu_desc_v1 v1;
+ struct iwl_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_4,
+ RX_MPDU_RES_START_API_S_VER_5 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
+
+#define RX_NO_DATA_CHAIN_A_POS 0
+#define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS)
+#define RX_NO_DATA_CHAIN_B_POS 8
+#define RX_NO_DATA_CHAIN_B_MSK (0xff << RX_NO_DATA_CHAIN_B_POS)
+#define RX_NO_DATA_CHANNEL_POS 16
+#define RX_NO_DATA_CHANNEL_MSK (0xff << RX_NO_DATA_CHANNEL_POS)
+
+#define RX_NO_DATA_INFO_TYPE_POS 0
+#define RX_NO_DATA_INFO_TYPE_MSK (0xff << RX_NO_DATA_INFO_TYPE_POS)
+#define RX_NO_DATA_INFO_TYPE_NONE 0
+#define RX_NO_DATA_INFO_TYPE_RX_ERR 1
+#define RX_NO_DATA_INFO_TYPE_NDP 2
+#define RX_NO_DATA_INFO_TYPE_MU_UNMATCHED 3
+#define RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED 4
+
+#define RX_NO_DATA_INFO_ERR_POS 8
+#define RX_NO_DATA_INFO_ERR_MSK (0xff << RX_NO_DATA_INFO_ERR_POS)
+#define RX_NO_DATA_INFO_ERR_NONE 0
+#define RX_NO_DATA_INFO_ERR_BAD_PLCP 1
+#define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2
+#define RX_NO_DATA_INFO_ERR_NO_DELIM 3
+#define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4
+
+#define RX_NO_DATA_FRAME_TIME_POS 0
+#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
+
+#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000
+#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000
+
+/**
+ * struct iwl_rx_no_data - RX no data descriptor
+ * @info: 7:0 frame type, 15:8 RX error type
+ * @rssi: 7:0 energy chain-A,
+ * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel
+ * @on_air_rise_time: GP2 during on air rise
+ * @fr_time: frame time
+ * @rate: rate/mcs of frame
+ * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type
+ * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
+ * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
+ * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
+ */
+struct iwl_rx_no_data {
+ __le32 info;
+ __le32 rssi;
+ __le32 on_air_rise_time;
+ __le32 fr_time;
+ __le32 rate;
+ __le32 phy_info[2];
+ __le32 rx_vec[2];
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
+ RX_NO_DATA_NTFY_API_S_VER_2 */
+
+struct iwl_frame_release {
+ u8 baid;
+ u8 reserved;
+ __le16 nssn;
+};
+
+/**
+ * enum iwl_bar_frame_release_sta_tid - STA/TID information for BAR release
+ * @IWL_BAR_FRAME_RELEASE_TID_MASK: TID mask
+ * @IWL_BAR_FRAME_RELEASE_STA_MASK: STA mask
+ */
+enum iwl_bar_frame_release_sta_tid {
+ IWL_BAR_FRAME_RELEASE_TID_MASK = 0x0000000f,
+ IWL_BAR_FRAME_RELEASE_STA_MASK = 0x000001f0,
+};
+
+/**
+ * enum iwl_bar_frame_release_ba_info - BA information for BAR release
+ * @IWL_BAR_FRAME_RELEASE_NSSN_MASK: NSSN mask
+ * @IWL_BAR_FRAME_RELEASE_SN_MASK: SN mask (ignored by driver)
+ * @IWL_BAR_FRAME_RELEASE_BAID_MASK: BAID mask
+ */
+enum iwl_bar_frame_release_ba_info {
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK = 0x00000fff,
+ IWL_BAR_FRAME_RELEASE_SN_MASK = 0x00fff000,
+ IWL_BAR_FRAME_RELEASE_BAID_MASK = 0x3f000000,
+};
+
+/**
+ * struct iwl_bar_frame_release - frame release from BAR info
+ * @sta_tid: STA & TID information, see &enum iwl_bar_frame_release_sta_tid.
+ * @ba_info: BA information, see &enum iwl_bar_frame_release_ba_info.
+ */
+struct iwl_bar_frame_release {
+ __le32 sta_tid;
+ __le32 ba_info;
+} __packed; /* RX_BAR_TO_FRAME_RELEASE_API_S_VER_1 */
+
+enum iwl_rss_hash_func_en {
+ IWL_RSS_HASH_TYPE_IPV4_TCP,
+ IWL_RSS_HASH_TYPE_IPV4_UDP,
+ IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
+ IWL_RSS_HASH_TYPE_IPV6_TCP,
+ IWL_RSS_HASH_TYPE_IPV6_UDP,
+ IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+};
+
+#define IWL_RSS_HASH_KEY_CNT 10
+#define IWL_RSS_INDIRECTION_TABLE_SIZE 128
+#define IWL_RSS_ENABLE 1
+
+/**
+ * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration
+ *
+ * @flags: 1 - enable, 0 - disable
+ * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en
+ * @reserved: reserved
+ * @secret_key: 320 bit input of random key configuration from driver
+ * @indirection_table: indirection table
+ */
+struct iwl_rss_config_cmd {
+ __le32 flags;
+ u8 hash_mask;
+ u8 reserved[3];
+ __le32 secret_key[IWL_RSS_HASH_KEY_CNT];
+ u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
+} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
+#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
+
+/**
+ * struct iwl_rxq_sync_cmd - RXQ notification trigger
+ *
+ * @flags: flags of the notification. bit 0:3 are the sender queue
+ * @rxq_mask: rx queues to send the notification on
+ * @count: number of bytes in payload, should be DWORD aligned
+ * @payload: data to send to rx queues
+ */
+struct iwl_rxq_sync_cmd {
+ __le32 flags;
+ __le32 rxq_mask;
+ __le32 count;
+ u8 payload[];
+} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rxq_sync_notification - Notification triggered by RXQ
+ * sync command
+ *
+ * @count: number of bytes in payload
+ * @payload: data to send to rx queues
+ */
+struct iwl_rxq_sync_notification {
+ __le32 count;
+ u8 payload[];
+} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
+
+/**
+ * enum iwl_mvm_pm_event - type of station PM event
+ * @IWL_MVM_PM_EVENT_AWAKE: station woke up
+ * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep
+ * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger
+ * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll
+ */
+enum iwl_mvm_pm_event {
+ IWL_MVM_PM_EVENT_AWAKE,
+ IWL_MVM_PM_EVENT_ASLEEP,
+ IWL_MVM_PM_EVENT_UAPSD,
+ IWL_MVM_PM_EVENT_PS_POLL,
+}; /* PEER_PM_NTFY_API_E_VER_1 */
+
+/**
+ * struct iwl_mvm_pm_state_notification - station PM state notification
+ * @sta_id: station ID of the station changing state
+ * @type: the new powersave state, see &enum iwl_mvm_pm_event
+ */
+struct iwl_mvm_pm_state_notification {
+ u8 sta_id;
+ u8 type;
+ /* private: */
+ __le16 reserved;
+} __packed; /* PEER_PM_NTFY_API_S_VER_1 */
+
+#define BA_WINDOW_STREAMS_MAX 16
+#define BA_WINDOW_STATUS_TID_MSK 0x000F
+#define BA_WINDOW_STATUS_STA_ID_POS 4
+#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0
+#define BA_WINDOW_STATUS_VALID_MSK BIT(9)
+
+/**
+ * struct iwl_ba_window_status_notif - reordering window's status notification
+ * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
+ * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
+ * @start_seq_num: the start sequence number of the bitmap
+ * @mpdu_rx_count: the number of received MPDUs since entering D0i3
+ */
+struct iwl_ba_window_status_notif {
+ __le64 bitmap[BA_WINDOW_STREAMS_MAX];
+ __le16 ra_tid[BA_WINDOW_STREAMS_MAX];
+ __le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
+ __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
+} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+ u8 q_num;
+ u8 enable;
+ __le16 reserved;
+ __le64 urbd_stts_wrptr;
+ __le64 fr_bd_cb;
+ __le64 ur_bd_cb;
+ __le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+ u8 num_queues;
+ u8 reserved[3];
+ struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
new file mode 100644
index 000000000..7ba0e3409
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -0,0 +1,1249 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_scan_h__
+#define __iwl_fw_api_scan_h__
+
+/* Scan Commands, Responses, Notifications */
+
+/**
+ * enum iwl_scan_subcmd_ids - scan commands
+ */
+enum iwl_scan_subcmd_ids {
+ /**
+ * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
+ */
+ OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
+};
+
+/* Max number of IEs for direct SSID scans in a command */
+#define PROBE_OPTION_MAX 20
+
+#define SCAN_SHORT_SSID_MAX_SIZE 8
+#define SCAN_BSSID_MAX_SIZE 16
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ *
+ * @id: element ID
+ * @len: element length
+ * @ssid: element (SSID) data
+ */
+struct iwl_ssid_ie {
+ u8 id;
+ u8 len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/* scan offload */
+#define IWL_SCAN_MAX_BLACKLIST_LEN 64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
+#define IWL_SCAN_MAX_PROFILES 11
+#define IWL_SCAN_MAX_PROFILES_V2 8
+#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+#define SCAN_NUM_BAND_PROBE_DATA_V_1 2
+#define SCAN_NUM_BAND_PROBE_DATA_V_2 3
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define CAN_ABORT_STATUS 1
+
+#define IWL_FULL_SCAN_MULTIPLIER 5
+#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWL_MAX_SCHED_SCAN_PLANS 2
+
+enum scan_framework_client {
+ SCAN_CLIENT_SCHED_SCAN = BIT(0),
+ SCAN_CLIENT_NETDETECT = BIT(1),
+ SCAN_CLIENT_ASSET_TRACKING = BIT(2),
+};
+
+/**
+ * struct iwl_scan_offload_blocklist - SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
+ */
+struct iwl_scan_offload_blocklist {
+ u8 ssid[ETH_ALEN];
+ u8 reported_rssi;
+ u8 client_bitmap;
+} __packed;
+
+enum iwl_scan_offload_network_type {
+ IWL_NETWORK_TYPE_BSS = 1,
+ IWL_NETWORK_TYPE_IBSS = 2,
+ IWL_NETWORK_TYPE_ANY = 3,
+};
+
+enum iwl_scan_offload_band_selection {
+ IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
+ IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
+ IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
+};
+
+enum iwl_scan_offload_auth_alg {
+ IWL_AUTH_ALGO_UNSUPPORTED = 0x00,
+ IWL_AUTH_ALGO_NONE = 0x01,
+ IWL_AUTH_ALGO_PSK = 0x02,
+ IWL_AUTH_ALGO_8021X = 0x04,
+ IWL_AUTH_ALGO_SAE = 0x08,
+ IWL_AUTH_ALGO_8021X_SHA384 = 0x10,
+ IWL_AUTH_ALGO_OWE = 0x20,
+};
+
+/**
+ * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption algorithm to match - bitmap
+ * @auth_alg: authentication algorithm to match - bitmap
+ * @network_type: enum iwl_scan_offload_network_type
+ * @band_selection: enum iwl_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
+ * @reserved: reserved
+ */
+struct iwl_scan_offload_profile {
+ u8 ssid_index;
+ u8 unicast_cipher;
+ u8 auth_alg;
+ u8 network_type;
+ u8 band_selection;
+ u8 client_bitmap;
+ u8 reserved[2];
+} __packed;
+
+/**
+ * struct iwl_scan_offload_profile_cfg_data
+ * @blocklist_len: length of blocklist
+ * @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
+ * @reserved: reserved
+ */
+struct iwl_scan_offload_profile_cfg_data {
+ u8 blocklist_len;
+ u8 num_profiles;
+ u8 match_notify;
+ u8 pass_match;
+ u8 active_clients;
+ u8 any_beacon_notify;
+ u8 reserved[2];
+} __packed;
+
+/**
+ * struct iwl_scan_offload_profile_cfg
+ * @profiles: profiles to search for match
+ * @data: the rest of the data for profile_cfg
+ */
+struct iwl_scan_offload_profile_cfg_v1 {
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_cfg_data data;
+} __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1-2*/
+
+/**
+ * struct iwl_scan_offload_profile_cfg
+ * @profiles: profiles to search for match
+ * @data: the rest of the data for profile_cfg
+ */
+struct iwl_scan_offload_profile_cfg {
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES_V2];
+ struct iwl_scan_offload_profile_cfg_data data;
+} __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_3*/
+
+/**
+ * struct iwl_scan_schedule_lmac - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwl_scan_schedule_lmac {
+ __le16 delay;
+ u8 iterations;
+ u8 full_scan_mul;
+} __packed; /* SCAN_SCHEDULE_API_S */
+
+enum iwl_scan_offload_complete_status {
+ IWL_SCAN_OFFLOAD_COMPLETED = 1,
+ IWL_SCAN_OFFLOAD_ABORTED = 2,
+};
+
+enum iwl_scan_ebs_status {
+ IWL_SCAN_EBS_SUCCESS,
+ IWL_SCAN_EBS_FAILED,
+ IWL_SCAN_EBS_CHAN_NOT_FOUND,
+ IWL_SCAN_EBS_INACTIVE,
+};
+
+/**
+ * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_req_tx_cmd {
+ __le32 tx_flags;
+ __le32 rate_n_flags;
+ u8 sta_id;
+ u8 reserved[3];
+} __packed;
+
+enum iwl_scan_channel_flags_lmac {
+ IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27),
+ IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28),
+};
+
+/**
+ * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags: bits 1-20: directed scan to i'th ssid
+ * other bits &enum iwl_scan_channel_flags_lmac
+ * @channel_num: channel number 1-13 etc
+ * @iter_count: scan iteration on this channel
+ * @iter_interval: interval in seconds between iterations on one channel
+ */
+struct iwl_scan_channel_cfg_lmac {
+ __le32 flags;
+ __le16 channel_num;
+ __le16 iter_count;
+ __le32 iter_interval;
+} __packed;
+
+/**
+ * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwl_scan_probe_segment {
+ __le16 offset;
+ __le16 len;
+} __packed;
+
+/**
+ * struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwl_scan_probe_req_v1 {
+ struct iwl_scan_probe_segment mac_header;
+ struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_1];
+ struct iwl_scan_probe_segment common_data;
+ u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+/**
+ * struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwl_scan_probe_req {
+ struct iwl_scan_probe_segment mac_header;
+ struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_2];
+ struct iwl_scan_probe_segment common_data;
+ u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+enum iwl_scan_channel_flags {
+ IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0),
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
+ IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
+ IWL_SCAN_CHANNEL_FLAG_FORCE_EBS = BIT(4),
+ IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER = BIT(5),
+ IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
+};
+
+/**
+ * struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
+ */
+struct iwl_scan_channel_opt {
+ __le16 flags;
+ __le16 non_ebs_ratio;
+} __packed;
+
+/**
+ * enum iwl_mvm_lmac_scan_flags - LMAC scan flags
+ * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
+ * without filtering.
+ * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
+ * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan
+ * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
+ * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS: multiple SSID matching
+ * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
+ * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
+ * and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels
+ * 1, 6 and 11.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
+ */
+enum iwl_mvm_lmac_scan_flags {
+ IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
+ IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1),
+ IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2),
+ IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3),
+ IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
+ IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
+ IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
+ IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7),
+ IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
+};
+
+enum iwl_scan_priority {
+ IWL_SCAN_PRIORITY_LOW,
+ IWL_SCAN_PRIORITY_MEDIUM,
+ IWL_SCAN_PRIORITY_HIGH,
+};
+
+enum iwl_scan_priority_ext {
+ IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+ IWL_SCAN_PRIORITY_EXT_1,
+ IWL_SCAN_PRIORITY_EXT_2,
+ IWL_SCAN_PRIORITY_EXT_3,
+ IWL_SCAN_PRIORITY_EXT_4,
+ IWL_SCAN_PRIORITY_EXT_5,
+ IWL_SCAN_PRIORITY_EXT_6,
+ IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
+/**
+ * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * @reserved1: for alignment and future use
+ * @n_channels: num of channels to scan
+ * @active_dwell: dwell time for active channels
+ * @passive_dwell: dwell time for passive channels
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases)
+ * @reserved2: for alignment and future use
+ * @rx_chain_select: PHY_RX_CHAIN_* flags
+ * @scan_flags: &enum iwl_mvm_lmac_scan_flags
+ * @max_out_time: max time (in TU) to be out of associated channel
+ * @suspend_time: pause scan this long (TUs) when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXON filter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_prio: enum iwl_scan_priority
+ * @iter_num: number of scan iterations
+ * @delay: delay in seconds before first iteration
+ * @schedule: two scheduling plans. The first one is finite, the second one can
+ * be infinite.
+ * @channel_opt: channel optimization options, for full and partial scan
+ * @data: channel configuration and probe request packet.
+ */
+struct iwl_scan_req_lmac {
+ /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
+ __le32 reserved1;
+ u8 n_channels;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 extended_dwell;
+ u8 reserved2;
+ __le16 rx_chain_select;
+ __le32 scan_flags;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ /* RX_ON_FLAGS_API_S_VER_1 */
+ __le32 flags;
+ __le32 filter_flags;
+ struct iwl_scan_req_tx_cmd tx_cmd[2];
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 scan_prio;
+ /* SCAN_REQ_PERIODIC_PARAMS_API_S */
+ __le32 iter_num;
+ __le32 delay;
+ struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ struct iwl_scan_channel_opt channel_opt[2];
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_scan_results_notif - scan results for one channel -
+ * SCAN_RESULT_NTF_API_S_VER_3
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ */
+struct iwl_scan_results_notif {
+ u8 channel;
+ u8 band;
+ u8 probe_status;
+ u8 num_probe_not_sent;
+ __le32 duration;
+} __packed;
+
+/**
+ * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels)
+ * SCAN_COMPLETE_NTF_API_S_VER_3
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: an array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_lmac_scan_complete_notif {
+ u8 scanned_channels;
+ u8 status;
+ u8 bt_status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+ struct iwl_scan_results_notif results[];
+} __packed;
+
+/**
+ * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: &enum iwl_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwl_scan_ebs_status
+ * @time_after_last_iter: time in seconds elapsed after last iteration
+ * @reserved: reserved
+ */
+struct iwl_periodic_scan_complete {
+ u8 last_schedule_line;
+ u8 last_schedule_iteration;
+ u8 status;
+ u8 ebs_status;
+ __le32 time_after_last_iter;
+ __le32 reserved;
+} __packed;
+
+/* UMAC Scan API */
+
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ */
+#define IWL_MVM_MAX_UMAC_SCANS 4
+#define IWL_MVM_MAX_LMAC_SCANS 1
+
+enum scan_config_flags {
+ SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
+ SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
+ SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
+ SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
+ SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
+ SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
+ SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
+ SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
+ SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
+ SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
+ SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22),
+ SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23),
+
+ /* Bits 26-31 are for num of channels in channel_array */
+#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+};
+
+enum scan_config_rates {
+ /* OFDM basic rates */
+ SCAN_CONFIG_RATE_6M = BIT(0),
+ SCAN_CONFIG_RATE_9M = BIT(1),
+ SCAN_CONFIG_RATE_12M = BIT(2),
+ SCAN_CONFIG_RATE_18M = BIT(3),
+ SCAN_CONFIG_RATE_24M = BIT(4),
+ SCAN_CONFIG_RATE_36M = BIT(5),
+ SCAN_CONFIG_RATE_48M = BIT(6),
+ SCAN_CONFIG_RATE_54M = BIT(7),
+ /* CCK basic rates */
+ SCAN_CONFIG_RATE_1M = BIT(8),
+ SCAN_CONFIG_RATE_2M = BIT(9),
+ SCAN_CONFIG_RATE_5M = BIT(10),
+ SCAN_CONFIG_RATE_11M = BIT(11),
+
+ /* Bits 16-27 are for supported rates */
+#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+};
+
+enum iwl_channel_flags {
+ IWL_CHANNEL_FLAG_EBS = BIT(0),
+ IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
+ IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
+};
+
+enum iwl_uhb_chan_cfg_flags {
+ IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES = BIT(24),
+ IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN = BIT(25),
+ IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE = BIT(26),
+};
+/**
+ * struct iwl_scan_dwell
+ * @active: default dwell time for active scan
+ * @passive: default dwell time for passive scan
+ * @fragmented: default dwell time for fragmented scan
+ * @extended: default dwell time for channels 1, 6 and 11
+ */
+struct iwl_scan_dwell {
+ u8 active;
+ u8 passive;
+ u8 fragmented;
+ u8 extended;
+} __packed;
+
+/**
+ * struct iwl_scan_config_v1 - scan configuration command
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell: dwells for the scan
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwl_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwl_scan_config_v1 {
+ __le32 flags;
+ __le32 tx_chains;
+ __le32 rx_chains;
+ __le32 legacy_rates;
+ __le32 out_of_channel_time;
+ __le32 suspend_time;
+ struct iwl_scan_dwell dwell;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_sta_id;
+ u8 channel_flags;
+ u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
+
+#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
+
+/**
+ * struct iwl_scan_config_v2 - scan configuration command
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell: dwells for the scan
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwl_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwl_scan_config_v2 {
+ __le32 flags;
+ __le32 tx_chains;
+ __le32 rx_chains;
+ __le32 legacy_rates;
+ __le32 out_of_channel_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ struct iwl_scan_dwell dwell;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_sta_id;
+ u8 channel_flags;
+ u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * struct iwl_scan_config - scan configuration command
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscouos_mode: whether to enable promiscouos mode
+ * @bcast_sta_id: the index of the station in the fw. Deprecated starting with
+ * API version 5.
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwl_scan_config {
+ u8 enable_cam_mode;
+ u8 enable_promiscouos_mode;
+ u8 bcast_sta_id;
+ u8 reserved;
+ __le32 tx_chains;
+ __le32 rx_chains;
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_5 */
+
+/**
+ * enum iwl_umac_scan_flags - UMAC scan flags
+ * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan will be resumed when the higher proirity scan is
+ * completed.
+ * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+enum iwl_umac_scan_flags {
+ IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
+ IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
+};
+
+enum iwl_umac_scan_uid_offsets {
+ IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
+ IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
+};
+
+enum iwl_umac_scan_general_flags {
+ IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
+ IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
+ /* Extended dwell is obselete when adaptive dwell is used, making this
+ * bit reusable. Hence, probe request defer is used only when adaptive
+ * dwell is supported. */
+ IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
+ IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = BIT(14),
+ IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = BIT(15),
+};
+
+/**
+ * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
+ * notification per channel or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
+ */
+enum iwl_umac_scan_general_flags2 {
+ IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
+};
+
+/**
+ * enum iwl_umac_scan_general_flags_v2 - UMAC scan general flags version 2
+ *
+ * The FW flags were reordered and hence the driver introduce version 2
+ *
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons
+ * during scan iterations
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification
+ * on every iteration instead of only once after the last iteration
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell
+ * for active channel
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned
+ * as passive
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and
+ * 5.2Ghz bands scan, trigger scan on 6GHz band to discover
+ * the reported collocated APs
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN: at the end of 2.4GHz and 5GHz
+ * bands scan, if not APs were discovered, allow scan to conitnue and scan
+ * 6GHz PSC channels in order to discover country information.
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case
+ * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is
+ * activated over 6GHz PSC channels, filter in beacons and probe responses.
+ * @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum
+ * rate of 5.5Mpbs, filter in broadcast probe responses and set the max
+ * channel time indication field in the FILS request parameters element
+ * (if included by the driver in the probe request IEs).
+ */
+enum iwl_umac_scan_general_flags_v2 {
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START = BIT(9),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14),
+ IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15),
+};
+
+/**
+ * enum iwl_umac_scan_general_params_flags2 - UMAC scan general flags2
+ *
+ * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB: scan event scheduling
+ * should be aware of a P2P GO operation on the 2GHz band.
+ * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling
+ * should be aware of a P2P GO operation on the 5GHz or 6GHz band.
+ */
+enum iwl_umac_scan_general_params_flags2 {
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0),
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1),
+};
+
+/**
+ * struct iwl_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @band: band of channel: 0 for 2GHz, 1 for 5GHz
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan iterations on one channel.
+ */
+struct iwl_scan_channel_cfg_umac {
+ __le32 flags;
+ /* Both versions are of the same size, so use a union without adjusting
+ * the command size later
+ */
+ union {
+ struct {
+ u8 channel_num;
+ u8 iter_count;
+ __le16 iter_interval;
+ } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
+ struct {
+ u8 channel_num;
+ u8 band;
+ u8 iter_count;
+ u8 iter_interval;
+ } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
+ * SCAN_CHANNEL_CONFIG_API_S_VER_3
+ * SCAN_CHANNEL_CONFIG_API_S_VER_4
+ */
+ };
+} __packed;
+
+/**
+ * struct iwl_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_umac_schedule {
+ __le16 interval;
+ u8 iter_count;
+ u8 reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+struct iwl_scan_req_umac_tail_v1 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ __le16 delay;
+ __le16 reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwl_scan_probe_req_v1 preq;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwl_scan_req_umac_tail_v2 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ __le16 delay;
+ __le16 reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_2 */
+ struct iwl_scan_probe_req preq;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwl_scan_umac_chan_param
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ */
+struct iwl_scan_umac_chan_param {
+ u8 flags;
+ u8 count;
+ __le16 reserved;
+} __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_umac
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @extended_dwell: dwell time for channels 1, 6 and 11
+ * @active_dwell: dwell time for active scan per LMAC
+ * @passive_dwell: dwell time for passive scan per LMAC
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @general_flags2: &enum iwl_umac_scan_general_flags2
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ * are 2 LMACs
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @num_of_fragments: Number of fragments needed for full coverage per band.
+ * Relevant only for fragmented scan.
+ * @channel: &struct iwl_scan_umac_chan_param
+ * @reserved: for future use and alignment
+ * @reserved3: for future use and alignment
+ * @data: &struct iwl_scan_channel_cfg_umac and
+ * &struct iwl_scan_req_umac_tail
+ */
+struct iwl_scan_req_umac {
+ __le32 flags;
+ __le32 uid;
+ __le32 ooc_priority;
+ __le16 general_flags;
+ u8 reserved;
+ u8 scan_start_mac_id;
+ union {
+ struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ __le32 scan_priority;
+ struct iwl_scan_umac_chan_param channel;
+ u8 data[];
+ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ struct iwl_scan_umac_chan_param channel;
+ u8 data[];
+ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 reserved3;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ struct iwl_scan_umac_chan_param channel;
+ u8 data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
+ struct {
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 reserved2;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 general_flags2;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+ struct iwl_scan_umac_chan_param channel;
+ u8 data[];
+ } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+ struct {
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 adwell_default_hb_n_aps;
+ u8 adwell_default_lb_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 general_flags2;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+ struct iwl_scan_umac_chan_param channel;
+ u8 data[];
+ } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */
+ };
+} __packed;
+
+#define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
+
+/**
+ * struct iwl_scan_probe_params_v3
+ * @preq: scan probe request params
+ * @ssid_num: number of valid SSIDs in direct scan array
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v3 {
+ struct iwl_scan_probe_req preq;
+ u8 ssid_num;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ u8 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */
+
+/**
+ * struct iwl_scan_probe_params_v4
+ * @preq: scan probe request params
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwl_scan_probe_params_v4 {
+ struct iwl_scan_probe_req preq;
+ u8 short_ssid_num;
+ u8 bssid_num;
+ __le16 reserved;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
+ u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
+
+#define SCAN_MAX_NUM_CHANS_V3 67
+
+/**
+ * struct iwl_scan_channel_params_v4
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @num_of_aps_override: override the number of APs the FW uses to calculate
+ * dwell time when adaptive dwell is used
+ * @reserved: for future use and alignment
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ * @adwell_ch_override_bitmap: when using adaptive dwell, override the number
+ * of APs value with &num_of_aps_override for the channel.
+ * To cast channel to index, use &iwl_mvm_scan_ch_and_band_to_idx
+ */
+struct iwl_scan_channel_params_v4 {
+ u8 flags;
+ u8 count;
+ u8 num_of_aps_override;
+ u8 reserved;
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+ u8 adwell_ch_override_bitmap[16];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 also
+ SCAN_CHANNEL_PARAMS_API_S_VER_5 */
+
+/**
+ * struct iwl_scan_channel_params_v6
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @n_aps_override: override the number of APs the FW uses to calculate dwell
+ * time when adaptive dwell is used.
+ * Channel k will use n_aps_override[i] when BIT(20 + i) is set in
+ * channel_config[k].flags
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwl_scan_channel_params_v6 {
+ u8 flags;
+ u8 count;
+ u8 n_aps_override[2];
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
+
+/**
+ * struct iwl_scan_general_params_v11
+ * @flags: &enum iwl_umac_scan_general_flags_v2
+ * @reserved: reserved for future
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @active_dwell: dwell time for active scan per LMAC
+ * @adwell_default_2g: adaptive dwell default number of APs
+ * for 2.4GHz channel
+ * @adwell_default_5g: adaptive dwell default number of APs
+ * for 5GHz channels
+ * @adwell_default_social_chn: adaptive dwell default number of
+ * APs per social channel
+ * @flags2: for version 11 see &enum iwl_umac_scan_general_params_flags2.
+ * Otherwise reserved.
+ * @adwell_max_budget: the maximal number of TUs that adaptive dwell
+ * can add to the total scan time
+ * @max_out_of_time: max out of serving channel time, per LMAC
+ * @suspend_time: max suspend time, per LMAC
+ * @scan_priority: priority of the request
+ * @passive_dwell: continues dwell time for passive channel
+ * (without adaptive dwell)
+ * @num_of_fragments: number of fragments needed for full fragmented
+ * scan coverage.
+ */
+struct iwl_scan_general_params_v11 {
+ __le16 flags;
+ u8 reserved;
+ u8 scan_start_mac_id;
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 adwell_default_2g;
+ u8 adwell_default_5g;
+ u8 adwell_default_social_chn;
+ u8 flags2;
+ __le16 adwell_max_budget;
+ __le32 max_out_of_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */
+
+/**
+ * struct iwl_scan_periodic_parms_v1
+ * @schedule: can scheduling parameter
+ * @delay: initial delay of the periodic scan in seconds
+ * @reserved: reserved for future
+ */
+struct iwl_scan_periodic_parms_v1 {
+ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+ __le16 delay;
+ __le16 reserved;
+} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_params_v12
+ * @general_params: &struct iwl_scan_general_params_v11
+ * @channel_params: &struct iwl_scan_channel_params_v4
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v3
+ */
+struct iwl_scan_req_params_v12 {
+ struct iwl_scan_general_params_v11 general_params;
+ struct iwl_scan_channel_params_v4 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v3 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_params_v15
+ * @general_params: &struct iwl_scan_general_params_v11
+ * @channel_params: &struct iwl_scan_channel_params_v6
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v4
+ */
+struct iwl_scan_req_params_v15 {
+ struct iwl_scan_general_params_v11 general_params;
+ struct iwl_scan_channel_params_v6 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */
+
+/**
+ * struct iwl_scan_req_umac_v12
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v12 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v12 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
+
+/**
+ * struct iwl_scan_req_umac_v15
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v15 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v15 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */
+
+/**
+ * struct iwl_umac_scan_abort
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwl_umac_scan_abort {
+ __le32 uid;
+ __le32 flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_umac_scan_complete
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @status: &enum iwl_scan_offload_complete_status
+ * @ebs_status: &enum iwl_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwl_umac_scan_complete {
+ __le32 uid;
+ u8 last_schedule;
+ u8 last_iter;
+ u8 status;
+ u8 ebs_status;
+ __le32 time_from_last_iter;
+ __le32 reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7
+
+/**
+ * struct iwl_scan_offload_profile_match_v1 - match information
+ * @bssid: matched bssid
+ * @reserved: reserved
+ * @channel: channel where the match occurred
+ * @energy: energy
+ * @matching_feature: feature matches
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in the scan offload request.
+ */
+struct iwl_scan_offload_profile_match_v1 {
+ u8 bssid[ETH_ALEN];
+ __le16 reserved;
+ u8 channel;
+ u8 energy;
+ u8 matching_feature;
+ u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwl_scan_offload_profiles_query_v1 - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_profiles_query_v1 {
+ __le32 matched_profiles;
+ __le32 last_scan_age;
+ __le32 n_scans_done;
+ __le32 gp2_d0u;
+ __le32 gp2_invoked;
+ u8 resume_while_scanning;
+ u8 self_recovery;
+ __le16 reserved;
+ struct iwl_scan_offload_profile_match_v1 matches[];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
+/**
+ * struct iwl_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @reserved: reserved
+ * @channel: channel where the match occurred
+ * @energy: energy
+ * @matching_feature: feature matches
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in the scan offload request.
+ */
+struct iwl_scan_offload_profile_match {
+ u8 bssid[ETH_ALEN];
+ __le16 reserved;
+ u8 channel;
+ u8 energy;
+ u8 matching_feature;
+ u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
+
+/**
+ * struct iwl_scan_offload_match_info - match results information
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_match_info {
+ __le32 matched_profiles;
+ __le32 last_scan_age;
+ __le32 n_scans_done;
+ __le32 gp2_d0u;
+ __le32 gp2_invoked;
+ u8 resume_while_scanning;
+ u8 self_recovery;
+ __le16 reserved;
+ struct iwl_scan_offload_profile_match matches[];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 and
+ * SCAN_OFFLOAD_MATCH_INFO_NOTIFICATION_S_VER_1
+ */
+
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @start_tsf: TSF timer in usecs of the scan start time for the mac specified
+ * in &struct iwl_scan_req_umac.
+ * @results: array of scan results, length in @scanned_channels
+ */
+struct iwl_umac_scan_iter_complete_notif {
+ __le32 uid;
+ u8 scanned_channels;
+ u8 status;
+ u8 bt_status;
+ u8 last_channel;
+ __le64 start_tsf;
+ struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h
new file mode 100644
index 000000000..be4ff79fa
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_sf_h__
+#define __iwl_fw_api_sf_h__
+
+/* Smart Fifo state */
+enum iwl_sf_state {
+ SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+ SF_FULL_ON,
+ SF_UNINIT,
+ SF_INIT_OFF,
+ SF_HW_NUM_STATES
+};
+
+/* Smart Fifo possible scenario */
+enum iwl_sf_scenario {
+ SF_SCENARIO_SINGLE_UNICAST,
+ SF_SCENARIO_AGG_UNICAST,
+ SF_SCENARIO_MULTICAST,
+ SF_SCENARIO_BA_RESP,
+ SF_SCENARIO_TX_RESP,
+ SF_NUM_SCENARIO
+};
+
+#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
+#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define SF_W_MARK_SISO 6144
+#define SF_W_MARK_MIMO2 8192
+#define SF_W_MARK_MIMO3 6144
+#define SF_W_MARK_LEGACY 4096
+#define SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
+#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
+
+/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+#define SF_CFG_DUMMY_NOTIF_OFF BIT(16)
+
+/**
+ * struct iwl_sf_cfg_cmd - Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in &enum iwl_sf_state.
+ * @watermark: Minimum allowed available free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwl_sf_cfg_cmd {
+ __le32 state;
+ __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+ __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+ __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+} __packed; /* SF_CFG_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_sf_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
new file mode 100644
index 000000000..d62fed543
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_sta_h__
+#define __iwl_fw_api_sta_h__
+
+/**
+ * enum iwl_sta_flags - flags for the ADD_STA host command
+ * @STA_FLG_REDUCED_TX_PWR_CTRL: reduced TX power (control frames)
+ * @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames)
+ * @STA_FLG_DISABLE_TX: set if TX should be disabled
+ * @STA_FLG_PS: set if STA is in Power Save
+ * @STA_FLG_DRAIN_FLOW: drain flow
+ * @STA_FLG_PAN: STA is for PAN interface
+ * @STA_FLG_CLASS_AUTH: station is authenticated
+ * @STA_FLG_CLASS_ASSOC: station is associated
+ * @STA_FLG_RTS_MIMO_PROT: station requires RTS MIMO protection (dynamic SMPS)
+ * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU (mask)
+ * @STA_FLG_MAX_AGG_SIZE_SHIFT: maximal size for A-MPDU (bit shift)
+ * @STA_FLG_MAX_AGG_SIZE_8K: maximal size for A-MPDU (8k supported)
+ * @STA_FLG_MAX_AGG_SIZE_16K: maximal size for A-MPDU (16k supported)
+ * @STA_FLG_MAX_AGG_SIZE_32K: maximal size for A-MPDU (32k supported)
+ * @STA_FLG_MAX_AGG_SIZE_64K: maximal size for A-MPDU (64k supported)
+ * @STA_FLG_MAX_AGG_SIZE_128K: maximal size for A-MPDU (128k supported)
+ * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported)
+ * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported)
+ * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported)
+ * @STA_FLG_MAX_AGG_SIZE_2M: maximal size for A-MPDU (2M supported)
+ * @STA_FLG_MAX_AGG_SIZE_4M: maximal size for A-MPDU (4M supported)
+ * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @STA_FLG_FAT_EN_20MHZ: no wide channels are supported, only 20 MHz
+ * @STA_FLG_FAT_EN_40MHZ: wide channels up to 40 MHz supported
+ * @STA_FLG_FAT_EN_80MHZ: wide channels up to 80 MHz supported
+ * @STA_FLG_FAT_EN_160MHZ: wide channels up to 160 MHz supported
+ * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @STA_FLG_MIMO_EN_SISO: no support for MIMO
+ * @STA_FLG_MIMO_EN_MIMO2: 2 streams supported
+ * @STA_FLG_MIMO_EN_MIMO3: 3 streams supported
+ * @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask)
+ * @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift)
+ * @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap)
+ * @STA_FLG_AGG_MPDU_DENS_4US: A-MPDU density (4 usec gap)
+ * @STA_FLG_AGG_MPDU_DENS_8US: A-MPDU density (8 usec gap)
+ * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap)
+ */
+enum iwl_sta_flags {
+ STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
+ STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
+
+ STA_FLG_DISABLE_TX = BIT(4),
+
+ STA_FLG_PS = BIT(8),
+ STA_FLG_DRAIN_FLOW = BIT(12),
+ STA_FLG_PAN = BIT(13),
+ STA_FLG_CLASS_AUTH = BIT(14),
+ STA_FLG_CLASS_ASSOC = BIT(15),
+ STA_FLG_RTS_MIMO_PROT = BIT(17),
+
+ STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
+ STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_2M = (8 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_4M = (9 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ STA_FLG_MAX_AGG_SIZE_MSK = (0xf << STA_FLG_MAX_AGG_SIZE_SHIFT),
+
+ STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
+ STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+ STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+
+ STA_FLG_FAT_EN_20MHZ = (0 << 26),
+ STA_FLG_FAT_EN_40MHZ = (1 << 26),
+ STA_FLG_FAT_EN_80MHZ = (2 << 26),
+ STA_FLG_FAT_EN_160MHZ = (3 << 26),
+ STA_FLG_FAT_EN_MSK = (3 << 26),
+
+ STA_FLG_MIMO_EN_SISO = (0 << 28),
+ STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
+ STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
+ STA_FLG_MIMO_EN_MSK = (3 << 28),
+};
+
+/**
+ * enum iwl_sta_key_flag - key flags for the ADD_STA host command
+ * @STA_KEY_FLG_NO_ENC: no encryption
+ * @STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_GCMP: GCMP encryption algorithm
+ * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
+ * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @STA_KEY_FLG_KEYID_POS: key index bit position
+ * @STA_KEY_NOT_VALID: key is invalid
+ * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @STA_KEY_FLG_KEY_32BYTES: for non-wep key set for 32 bytes key
+ * @STA_KEY_MULTICAST: set for multical key
+ * @STA_KEY_MFP: key is used for Management Frame Protection
+ */
+enum iwl_sta_key_flag {
+ STA_KEY_FLG_NO_ENC = (0 << 0),
+ STA_KEY_FLG_WEP = (1 << 0),
+ STA_KEY_FLG_CCM = (2 << 0),
+ STA_KEY_FLG_TKIP = (3 << 0),
+ STA_KEY_FLG_EXT = (4 << 0),
+ STA_KEY_FLG_GCMP = (5 << 0),
+ STA_KEY_FLG_CMAC = (6 << 0),
+ STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
+ STA_KEY_FLG_EN_MSK = (7 << 0),
+
+ STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_KEYID_POS = 8,
+ STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
+ STA_KEY_NOT_VALID = BIT(11),
+ STA_KEY_FLG_WEP_13BYTES = BIT(12),
+ STA_KEY_FLG_KEY_32BYTES = BIT(12),
+ STA_KEY_MULTICAST = BIT(14),
+ STA_KEY_MFP = BIT(15),
+};
+
+/**
+ * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
+ * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
+ * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
+ * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @STA_MODIFY_PROT_TH: modify RTS threshold
+ * @STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+enum iwl_sta_modify_flag {
+ STA_MODIFY_QUEUE_REMOVAL = BIT(0),
+ STA_MODIFY_TID_DISABLE_TX = BIT(1),
+ STA_MODIFY_UAPSD_ACS = BIT(2),
+ STA_MODIFY_ADD_BA_TID = BIT(3),
+ STA_MODIFY_REMOVE_BA_TID = BIT(4),
+ STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
+ STA_MODIFY_PROT_TH = BIT(6),
+ STA_MODIFY_QUEUES = BIT(7),
+};
+
+/**
+ * enum iwl_sta_mode - station command mode
+ * @STA_MODE_ADD: add new station
+ * @STA_MODE_MODIFY: modify the station
+ */
+enum iwl_sta_mode {
+ STA_MODE_ADD = 0,
+ STA_MODE_MODIFY = 1,
+};
+
+/**
+ * enum iwl_sta_sleep_flag - type of sleep of the station
+ * @STA_SLEEP_STATE_AWAKE: station is awake
+ * @STA_SLEEP_STATE_PS_POLL: station is PS-polling
+ * @STA_SLEEP_STATE_UAPSD: station uses U-APSD
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
+ */
+enum iwl_sta_sleep_flag {
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_MOREDATA = BIT(2),
+};
+
+#define STA_KEY_MAX_NUM (16)
+#define STA_KEY_IDX_INVALID (0xff)
+#define STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWL_MAX_GLOBAL_KEYS (4)
+#define STA_KEY_LEN_WEP40 (5)
+#define STA_KEY_LEN_WEP104 (13)
+
+#define IWL_ADD_STA_STATUS_MASK 0xFF
+#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWL_ADD_STA_BAID_MASK 0x7F00
+#define IWL_ADD_STA_BAID_SHIFT 8
+
+/**
+ * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd_v7 {
+ u8 add_modify;
+ u8 awake_acs;
+ __le16 tid_disable_tx;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+
+/**
+ * enum iwl_sta_type - FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWL_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ * and probe responses.
+ * @IWL_STA_MULTICAST: multicast traffic,
+ * @IWL_STA_TDLS_LINK: TDLS link station
+ * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
+ */
+enum iwl_sta_type {
+ IWL_STA_LINK,
+ IWL_STA_GENERAL_PURPOSE,
+ IWL_STA_MULTICAST,
+ IWL_STA_TDLS_LINK,
+ IWL_STA_AUX_ACTIVITY,
+};
+
+/**
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station.
+ * Obselete for new TX API (9 and above).
+ * @rx_ba_window: aggregation window size
+ * @sp_length: the size of the SP in actual number of frames
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd {
+ u8 add_modify;
+ u8 awake_acs;
+ __le16 tid_disable_tx;
+ __le32 mac_id_n_color; /* can be used for lmac id when using cmd v12 */
+ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ u8 sleep_state_flags;
+ u8 station_type;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+ __le16 rx_ba_window;
+ u8 sp_length;
+ u8 uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
+
+/**
+ * struct iwl_mvm_add_sta_key_common - add/modify sta key common part
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type &enum iwl_sta_key_flag
+ * @key: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ */
+struct iwl_mvm_add_sta_key_common {
+ u8 sta_id;
+ u8 key_offset;
+ __le16 key_flags;
+ u8 key[32];
+ u8 rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @reserved: reserved
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd_v1 {
+ struct iwl_mvm_add_sta_key_common common;
+ u8 tkip_rx_tsc_byte2;
+ u8 reserved;
+ __le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ *
+ * Note: This is used for both v2 and v3, the difference being
+ * in the way the common.rx_secur_seq_cnt is used, in v2 that's
+ * the strange hole format, in v3 it's just a u64.
+ */
+struct iwl_mvm_add_sta_key_cmd {
+ struct iwl_mvm_add_sta_key_common common;
+ __le64 rx_mic_key;
+ __le64 tx_mic_key;
+ __le64 transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2, ADD_MODIFY_STA_KEY_API_S_VER_3 */
+
+/**
+ * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
+ * @ADD_STA_SUCCESS: operation was executed successfully
+ * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
+ * doesn't exist.
+ */
+enum iwl_mvm_add_sta_rsp_status {
+ ADD_STA_SUCCESS = 0x1,
+ ADD_STA_STATIONS_OVERLOAD = 0x2,
+ ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
+ ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
+};
+
+/**
+ * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ * @reserved: reserved
+ */
+struct iwl_mvm_rm_sta_cmd {
+ u8 sta_id;
+ u8 reserved[3];
+} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: &enum iwl_sta_key_flag
+ * @igtk: IGTK key material
+ * @k1: unused
+ * @k2: unused
+ * @sta_id: station ID that support IGTK
+ * @key_id: key ID
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
+ __le32 ctrl_flags;
+ u8 igtk[16];
+ u8 k1[16];
+ u8 k2[16];
+ __le32 key_id;
+ __le32 sta_id;
+ __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: &enum iwl_sta_key_flag
+ * @igtk: IGTK master key
+ * @sta_id: station ID that support IGTK
+ * @key_id: key ID
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd {
+ __le32 ctrl_flags;
+ u8 igtk[32];
+ __le32 key_id;
+ __le32 sta_id;
+ __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
+
+struct iwl_mvm_wep_key {
+ u8 key_index;
+ u8 key_offset;
+ __le16 reserved1;
+ u8 key_size;
+ u8 reserved2[3];
+ u8 key[16];
+} __packed;
+
+struct iwl_mvm_wep_key_cmd {
+ __le32 mac_id_n_color;
+ u8 num_keys;
+ u8 decryption_type;
+ u8 flags;
+ u8 reserved;
+ struct iwl_mvm_wep_key wep_key[];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ * short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+ __le32 remain_frame_count;
+ __le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
new file mode 100644
index 000000000..898e62326
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -0,0 +1,961 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_stats_h__
+#define __iwl_fw_api_stats_h__
+#include "mac.h"
+
+struct mvm_statistics_dbg {
+ __le32 burst_check;
+ __le32 burst_count;
+ __le32 wait_for_silence_timeout_cnt;
+ u8 reserved[12];
+} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
+
+struct mvm_statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+ __le32 rssi_ant;
+ __le32 reserved2;
+} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+/**
+ * struct mvm_statistics_rx_non_phy
+ * @bogus_cts: CTS received when not expecting CTS
+ * @bogus_ack: ACK received when not expecting ACK
+ * @non_channel_beacons: beacons with our bss id but not on our serving channel
+ * @channel_beacons: beacons with our bss id and in our serving channel
+ * @num_missed_bcon: number of missed beacons
+ * @adc_rx_saturation_time: count in 0.8us units the time the ADC was in
+ * saturation
+ * @ina_detection_search_time: total time (in 0.8us) searched for INA
+ * @beacon_silence_rssi_a: RSSI silence after beacon frame
+ * @beacon_silence_rssi_b: RSSI silence after beacon frame
+ * @beacon_silence_rssi_c: RSSI silence after beacon frame
+ * @interference_data_flag: flag for interference data availability. 1 when data
+ * is available.
+ * @channel_load: counts RX Enable time in uSec
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @beacon_rssi_c: beacon RSSI on antenna C
+ * @beacon_energy_a: beacon energy on antenna A
+ * @beacon_energy_b: beacon energy on antenna B
+ * @beacon_energy_c: beacon energy on antenna C
+ * @num_bt_kills: number of BT "kills" (frame TX aborts)
+ * @mac_id: mac ID
+ */
+struct mvm_statistics_rx_non_phy {
+ __le32 bogus_cts;
+ __le32 bogus_ack;
+ __le32 non_channel_beacons;
+ __le32 channel_beacons;
+ __le32 num_missed_bcon;
+ __le32 adc_rx_saturation_time;
+ __le32 ina_detection_search_time;
+ __le32 beacon_silence_rssi_a;
+ __le32 beacon_silence_rssi_b;
+ __le32 beacon_silence_rssi_c;
+ __le32 interference_data_flag;
+ __le32 channel_load;
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 beacon_rssi_c;
+ __le32 beacon_energy_a;
+ __le32 beacon_energy_b;
+ __le32 beacon_energy_c;
+ __le32 num_bt_kills;
+ __le32 mac_id;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_4 */
+
+struct mvm_statistics_rx_non_phy_v3 {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ __le32 channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ __le32 num_missed_bcon; /* number of missed beacons */
+ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
+ __le32 interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 beacon_rssi_c;
+ __le32 beacon_energy_a;
+ __le32 beacon_energy_b;
+ __le32 beacon_energy_c;
+ __le32 num_bt_kills;
+ __le32 mac_id;
+ __le32 directed_data_mpdu;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy {
+ __le32 unresponded_rts;
+ __le32 rxe_frame_lmt_overrun;
+ __le32 sent_ba_rsp_cnt;
+ __le32 dsp_self_kill;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy_v2 {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_lmt_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+ __le32 sent_ba_rsp_cnt;
+ __le32 dsp_self_kill;
+ __le32 mh_format_err;
+ __le32 re_acq_main_rssi_sum;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_rx_ht_phy_v1 {
+ __le32 plcp_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 crc32_err;
+ __le32 mh_format_err;
+ __le32 agg_crc32_good;
+ __le32 agg_mpdu_cnt;
+ __le32 agg_cnt;
+ __le32 unsupport_mcs;
+} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+struct mvm_statistics_rx_ht_phy {
+ __le32 mh_format_err;
+ __le32 agg_mpdu_cnt;
+ __le32 agg_cnt;
+ __le32 unsupport_mcs;
+} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_tx_non_phy_v3 {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_next_frame_mismatch_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_tx_non_phy {
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_next_frame_mismatch_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_4 */
+
+#define MAX_CHAINS 3
+
+struct mvm_statistics_tx_non_phy_agg {
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 frame_not_ready;
+ __le32 underrun;
+ __le32 bt_prio_kill;
+ __le32 rx_ba_rsp_cnt;
+ __s8 txpower[MAX_CHAINS];
+ __s8 reserved;
+ __le32 reserved2;
+} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct mvm_statistics_tx_channel_width {
+ __le32 ext_cca_narrow_ch20[1];
+ __le32 ext_cca_narrow_ch40[2];
+ __le32 ext_cca_narrow_ch80[3];
+ __le32 ext_cca_narrow_ch160[4];
+ __le32 last_tx_ch_width_indx;
+ __le32 rx_detected_per_ch_width[4];
+ __le32 success_per_ch_width[4];
+ __le32 fail_per_ch_width[4];
+}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct mvm_statistics_tx_v4 {
+ struct mvm_statistics_tx_non_phy_v3 general;
+ struct mvm_statistics_tx_non_phy_agg agg;
+ struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_4 */
+
+struct mvm_statistics_tx {
+ struct mvm_statistics_tx_non_phy general;
+ struct mvm_statistics_tx_non_phy_agg agg;
+ struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_5 */
+
+
+struct mvm_statistics_bt_activity {
+ __le32 hi_priority_tx_req_cnt;
+ __le32 hi_priority_tx_denied_cnt;
+ __le32 lo_priority_tx_req_cnt;
+ __le32 lo_priority_tx_denied_cnt;
+ __le32 hi_priority_rx_req_cnt;
+ __le32 hi_priority_rx_denied_cnt;
+ __le32 lo_priority_rx_req_cnt;
+ __le32 lo_priority_rx_denied_cnt;
+} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct mvm_statistics_general_common_v19 {
+ __le32 radio_temperature;
+ __le32 radio_voltage;
+ struct mvm_statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct mvm_statistics_div slow_div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+ __le32 beacon_filtered;
+ __le32 missed_beacons;
+ u8 beacon_filter_average_energy;
+ u8 beacon_filter_reason;
+ u8 beacon_filter_current_energy;
+ u8 beacon_filter_reserved;
+ __le32 beacon_filter_delta_time;
+ struct mvm_statistics_bt_activity bt_activity;
+ __le64 rx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+ __le64 tx_time;
+} __packed;
+
+struct mvm_statistics_general_common {
+ __le32 radio_temperature;
+ struct mvm_statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct mvm_statistics_div slow_div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+ __le32 beacon_filtered;
+ __le32 missed_beacons;
+ u8 beacon_filter_average_energy;
+ u8 beacon_filter_reason;
+ u8 beacon_filter_current_energy;
+ u8 beacon_filter_reserved;
+ __le32 beacon_filter_delta_time;
+ struct mvm_statistics_bt_activity bt_activity;
+ __le64 rx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+ __le64 tx_time;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct mvm_statistics_general_v8 {
+ struct mvm_statistics_general_common_v19 common;
+ __le32 beacon_counter[NUM_MAC_INDEX];
+ u8 beacon_average_energy[NUM_MAC_INDEX];
+ u8 reserved[4 - (NUM_MAC_INDEX % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+
+struct mvm_statistics_general {
+ struct mvm_statistics_general_common common;
+ __le32 beacon_counter[MAC_INDEX_AUX];
+ u8 beacon_average_energy[MAC_INDEX_AUX];
+ u8 reserved[8 - MAC_INDEX_AUX];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+/**
+ * struct mvm_statistics_load - RX statistics for multi-queue devices
+ * @air_time: accumulated air time, per mac
+ * @byte_count: accumulated byte count, per mac
+ * @pkt_count: accumulated packet count, per mac
+ * @avg_energy: average RSSI, per station
+ */
+struct mvm_statistics_load {
+ __le32 air_time[MAC_INDEX_AUX];
+ __le32 byte_count[MAC_INDEX_AUX];
+ __le32 pkt_count[MAC_INDEX_AUX];
+ u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
+
+struct mvm_statistics_load_v1 {
+ __le32 air_time[NUM_MAC_INDEX];
+ __le32 byte_count[NUM_MAC_INDEX];
+ __le32 pkt_count[NUM_MAC_INDEX];
+ u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
+
+struct mvm_statistics_rx {
+ struct mvm_statistics_rx_phy ofdm;
+ struct mvm_statistics_rx_phy cck;
+ struct mvm_statistics_rx_non_phy general;
+ struct mvm_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_4 */
+
+struct mvm_statistics_rx_v3 {
+ struct mvm_statistics_rx_phy_v2 ofdm;
+ struct mvm_statistics_rx_phy_v2 cck;
+ struct mvm_statistics_rx_non_phy_v3 general;
+ struct mvm_statistics_rx_ht_phy_v1 ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * STATISTICS_CMD (0x9c), below.
+ */
+
+struct iwl_notif_statistics_v10 {
+ __le32 flag;
+ struct mvm_statistics_rx_v3 rx;
+ struct mvm_statistics_tx_v4 tx;
+ struct mvm_statistics_general_v8 general;
+} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
+
+struct iwl_notif_statistics_v11 {
+ __le32 flag;
+ struct mvm_statistics_rx_v3 rx;
+ struct mvm_statistics_tx_v4 tx;
+ struct mvm_statistics_general_v8 general;
+ struct mvm_statistics_load_v1 load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
+
+struct iwl_notif_statistics {
+ __le32 flag;
+ struct mvm_statistics_rx rx;
+ struct mvm_statistics_tx tx;
+ struct mvm_statistics_general general;
+ struct mvm_statistics_load load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
+
+/**
+ * enum iwl_statistics_notif_flags - flags used in statistics notification
+ * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
+ */
+enum iwl_statistics_notif_flags {
+ IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1,
+};
+
+/**
+ * enum iwl_statistics_cmd_flags - flags used in statistics command
+ * @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report
+ * that's sent after this command
+ * @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
+ * notifications
+ */
+enum iwl_statistics_cmd_flags {
+ IWL_STATISTICS_FLG_CLEAR = 0x1,
+ IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2,
+};
+
+/**
+ * struct iwl_statistics_cmd - statistics config command
+ * @flags: flags from &enum iwl_statistics_cmd_flags
+ */
+struct iwl_statistics_cmd {
+ __le32 flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
+#define MAX_BCAST_FILTER_NUM 8
+
+/**
+ * enum iwl_fw_statistics_type
+ *
+ * @FW_STATISTICS_OPERATIONAL: operational statistics
+ * @FW_STATISTICS_PHY: phy statistics
+ * @FW_STATISTICS_MAC: mac statistics
+ * @FW_STATISTICS_RX: rx statistics
+ * @FW_STATISTICS_TX: tx statistics
+ * @FW_STATISTICS_DURATION: duration statistics
+ * @FW_STATISTICS_HE: he statistics
+ */
+enum iwl_fw_statistics_type {
+ FW_STATISTICS_OPERATIONAL,
+ FW_STATISTICS_PHY,
+ FW_STATISTICS_MAC,
+ FW_STATISTICS_RX,
+ FW_STATISTICS_TX,
+ FW_STATISTICS_DURATION,
+ FW_STATISTICS_HE,
+}; /* FW_STATISTICS_TYPE_API_E_VER_1 */
+
+#define IWL_STATISTICS_TYPE_MSK 0x7f
+/**
+ * struct iwl_statistics_ntfy_hdr
+ *
+ * @type: struct type
+ * @version: version of the struct
+ * @size: size in bytes
+ */
+struct iwl_statistics_ntfy_hdr {
+ u8 type;
+ u8 version;
+ __le16 size;
+}; /* STATISTICS_NTFY_HDR_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_ntfy_per_mac
+ *
+ * @beacon_filter_average_energy: Average energy [-dBm] of the 2
+ * antennas.
+ * @air_time: air time
+ * @beacon_counter: all beacons (both filtered and not filtered)
+ * @beacon_average_energy: all beacons (both filtered and not
+ * filtered)
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @rx_bytes: RX byte count
+ */
+struct iwl_statistics_ntfy_per_mac {
+ __le32 beacon_filter_average_energy;
+ __le32 air_time;
+ __le32 beacon_counter;
+ __le32 beacon_average_energy;
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 rx_bytes;
+} __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */
+
+#define IWL_STATS_MAX_BW_INDEX 5
+/** struct iwl_statistics_ntfy_per_phy
+ * @channel_load: channel load
+ * @channel_load_by_us: device contribution to MCLM
+ * @channel_load_not_by_us: other devices' contribution to MCLM
+ * @clt: CLT HW timer (TIM_CH_LOAD2)
+ * @act: active accumulator SW
+ * @elp: elapsed time accumulator SW
+ * @rx_detected_per_ch_width: number of deferred TX per channel width,
+ * 0 - 20, 1/2/3 - 40/80/160
+ * @success_per_ch_width: number of frames that got ACK/BACK/CTS
+ * per channel BW. note, BACK counted as 1
+ * @fail_per_ch_width: number of frames that didn't get ACK/BACK/CTS
+ * per channel BW. note BACK counted as 1
+ * @last_tx_ch_width_indx: last txed frame channel width index
+ */
+struct iwl_statistics_ntfy_per_phy {
+ __le32 channel_load;
+ __le32 channel_load_by_us;
+ __le32 channel_load_not_by_us;
+ __le32 clt;
+ __le32 act;
+ __le32 elp;
+ __le32 rx_detected_per_ch_width[IWL_STATS_MAX_BW_INDEX];
+ __le32 success_per_ch_width[IWL_STATS_MAX_BW_INDEX];
+ __le32 fail_per_ch_width[IWL_STATS_MAX_BW_INDEX];
+ __le32 last_tx_ch_width_indx;
+} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_ntfy_per_sta
+ *
+ * @average_energy: in fact it is minus the energy..
+ */
+struct iwl_statistics_ntfy_per_sta {
+ __le32 average_energy;
+} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
+
+#define IWL_STATS_MAX_PHY_OPERTINAL 3
+/**
+ * struct iwl_statistics_operational_ntfy
+ *
+ * @hdr: general statistics header
+ * @flags: bitmap of possible notification structures
+ * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac
+ * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy
+ * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta
+ * @rx_time: rx time
+ * @tx_time: usec the radio is transmitting.
+ * @on_time_rf: The total time in usec the RF is awake.
+ * @on_time_scan: usec the radio is awake due to scan.
+ */
+struct iwl_statistics_operational_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 flags;
+ struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX];
+ struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL];
+ struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX];
+ __le64 rx_time;
+ __le64 tx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_15 */
+
+/**
+ * struct iwl_statistics_operational_ntfy_ver_14
+ *
+ * @hdr: general statistics header
+ * @flags: bitmap of possible notification structures
+ * @mac_id: mac on which the beacon was received
+ * @beacon_filter_average_energy: Average energy [-dBm] of the 2
+ * antennas.
+ * @beacon_filter_reason: beacon filter reason
+ * @radio_temperature: radio temperature
+ * @air_time: air time
+ * @beacon_counter: all beacons (both filtered and not filtered)
+ * @beacon_average_energy: all beacons (both filtered and not
+ * filtered)
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @rx_bytes: per MAC RX byte count
+ * @rx_time: rx time
+ * @tx_time: usec the radio is transmitting.
+ * @on_time_rf: The total time in usec the RF is awake.
+ * @on_time_scan: usec the radio is awake due to scan.
+ * @average_energy: in fact it is minus the energy..
+ * @reserved: reserved
+ */
+struct iwl_statistics_operational_ntfy_ver_14 {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 flags;
+ __le32 mac_id;
+ __le32 beacon_filter_average_energy;
+ __le32 beacon_filter_reason;
+ __le32 radio_temperature;
+ __le32 air_time[MAC_INDEX_AUX];
+ __le32 beacon_counter[MAC_INDEX_AUX];
+ __le32 beacon_average_energy[MAC_INDEX_AUX];
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 rx_bytes[MAC_INDEX_AUX];
+ __le64 rx_time;
+ __le64 tx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+ __le32 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ __le32 reserved;
+} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */
+
+/**
+ * struct iwl_statistics_phy_ntfy
+ *
+ * @hdr: general statistics header
+ * RX PHY related statistics
+ * @energy_and_config: ???
+ * @rssi_band: @31:24 rssiAllBand_B, 23:16 rssiInBand_B, 15:8
+ * rssiAllBand_A, 7:0 rssiInBand_A
+ * @agc_word: @31:16 agcWord_B, 15:0 agcWord_A
+ * @agc_gain: @19:10 agcGain_B, 9:0 agcGain_A
+ * @dfe_gain: @19:10 dfeGain_B, 9:0 dfeGain_A
+ * @snr_calc_main: @18:0 snrCalcMain
+ * @energy_calc_main: @18:0 energyCalcMain
+ * @snr_calc_aux: @18:0 snrCalcAux
+ * @dsp_dc_estim_a: @27:14 dspDcEstimQA, 13:0 dspDcEstimIA
+ * @dsp_dc_estim_b: @27:14 dspDcEstimQB, 13:0 dspDcEstimIB
+ * @ina_detec_type_and_ofdm_corr_comb: @31:31 inaDetectCckMrc,
+ * 30:27 inaDetectType, 26:0 ofdmCorrComb
+ * @cw_corr_comb: @26:0 cwCorrComb
+ * @rssi_comb: @25:0 rssiComb
+ * @auto_corr_cck: @23:12 autoCck, 11:00 crossCck
+ * @ofdm_fine_freq_and_pina_freq_err: @18:7 ofdmFineFreq, 6:0
+ * ofdmPinaFreqErr
+ * @snrm_evm_main: @31:0 snrmEvmMain
+ * @snrm_evm_aux: @31:0 snrmEvmAux
+ * @rx_rate: @31:0 rate
+ * TX PHY related statistics
+ * @per_chain_enums_and_dsp_atten_a: @perChainEnumsAndDspAtten
+ * (per version)
+ * @target_power_and_power_meas_a: @31:16 targetPower_A, 15:0
+ * powerMeasuredCalc_A
+ * @tx_config_as_i_and_ac_a: @31:16 txConfigAsI_A, 15:0
+ * txConfigAc_A
+ * @predist_dcq_and_dci_a: @31:16 predist_dci_A, 15:0
+ * predist_dcq_A
+ * @per_chain_enums_and_dsp_atten_b: @perChainEnumsAndDspAtten
+ * (per version)
+ * @target_power_and_power_meas_b: @31:16 targetPower_B, 15:0
+ * powerMeasuredCalc_B
+ * @tx_config_as_i_and_ac_b: @31:16 txConfigAsI_B, 15:0
+ * txConfigAc_B
+ * @predist_dcq_and_dci_b: @31:16 predist_dci_B, 15:0
+ * predist_dcq_B
+ * @tx_rate: @31:0 rate
+ * @tlc_backoff: @31:0 tlcBackoff
+ * @mpapd_calib_mode_mpapd_calib_type_a: @31:16
+ * mpapdCalibMode_A, 15:0 mpapdCalibType_A
+ * @psat_and_phy_power_limit_a: @31:16 psat_A, 15:0
+ * phyPowerLimit_A
+ * @sar_and_regulatory_power_limit_a: @31:16 sarPowerLimit_A,
+ * 15:0 regulatoryPowerLimit_A
+ * @mpapd_calib_mode_mpapd_calib_type_b: @31:16
+ * mpapdCalibMode_B, 15:0 mpapdCalibType_B
+ * @psat_and_phy_power_limit_b: @31:16 psat_B, 15:0
+ * phyPowerLimit_B
+ * @sar_and_regulatory_power_limit_b: @31:16 sarPowerLimit_B,
+ * 15:0 regulatoryPowerLimit_B
+ * @srd_and_driver_power_limits: @31:16 srdPowerLimit, 15:0
+ * driverPowerLimit
+ * @reserved: reserved
+ */
+struct iwl_statistics_phy_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 energy_and_config;
+ __le32 rssi_band;
+ __le32 agc_word;
+ __le32 agc_gain;
+ __le32 dfe_gain;
+ __le32 snr_calc_main;
+ __le32 energy_calc_main;
+ __le32 snr_calc_aux;
+ __le32 dsp_dc_estim_a;
+ __le32 dsp_dc_estim_b;
+ __le32 ina_detec_type_and_ofdm_corr_comb;
+ __le32 cw_corr_comb;
+ __le32 rssi_comb;
+ __le32 auto_corr_cck;
+ __le32 ofdm_fine_freq_and_pina_freq_err;
+ __le32 snrm_evm_main;
+ __le32 snrm_evm_aux;
+ __le32 rx_rate;
+ __le32 per_chain_enums_and_dsp_atten_a;
+ __le32 target_power_and_power_meas_a;
+ __le32 tx_config_as_i_and_ac_a;
+ __le32 predist_dcq_and_dci_a;
+ __le32 per_chain_enums_and_dsp_atten_b;
+ __le32 target_power_and_power_meas_b;
+ __le32 tx_config_as_i_and_ac_b;
+ __le32 predist_dcq_and_dci_b;
+ __le32 tx_rate;
+ __le32 tlc_backoff;
+ __le32 mpapd_calib_mode_mpapd_calib_type_a;
+ __le32 psat_and_phy_power_limit_a;
+ __le32 sar_and_regulatory_power_limit_a;
+ __le32 mpapd_calib_mode_mpapd_calib_type_b;
+ __le32 psat_and_phy_power_limit_b;
+ __le32 sar_and_regulatory_power_limit_b;
+ __le32 srd_and_driver_power_limits;
+ __le32 reserved;
+} __packed; /* STATISTICS_PHY_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_mac_ntfy
+ *
+ * @hdr: general statistics header
+ * @bcast_filter_passed_per_mac: bcast filter passed per mac
+ * @bcast_filter_dropped_per_mac: bcast filter dropped per mac
+ * @bcast_filter_passed_per_filter: bcast filter passed per filter
+ * @bcast_filter_dropped_per_filter: bcast filter dropped per filter
+ * @reserved: reserved
+ */
+struct iwl_statistics_mac_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 bcast_filter_passed_per_mac[NUM_MAC_INDEX_CDB];
+ __le32 bcast_filter_dropped_per_mac[NUM_MAC_INDEX_CDB];
+ __le32 bcast_filter_passed_per_filter[MAX_BCAST_FILTER_NUM];
+ __le32 bcast_filter_dropped_per_filter[MAX_BCAST_FILTER_NUM];
+ __le32 reserved;
+} __packed; /* STATISTICS_MAC_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_rx_ntfy
+ *
+ * @hdr: general statistics header
+ * @rx_agg_mpdu_cnt: aggregation frame count (number of
+ * delimiters)
+ * @rx_agg_cnt: number of RX Aggregations
+ * @unsupported_mcs: number of PLCP headers that have rate which
+ * is unsupported by DSP
+ * @bogus_cts: CTS received when not expecting CTS
+ * @bogus_ack: ACK received when not expecting ACK
+ * @rx_byte_count: ???
+ * @rx_packet_count: ???
+ * @missed_beacons: ???
+ * @unresponded_rts: un-responded RTS, due to NAV not zero
+ * @rxe_frame_limit_overrun: RXE got frame limit overrun
+ * @sent_ba_rsp_cnt: BA response TX count
+ * @late_rx_handle: count the number of times the RX path was
+ * aborted due to late entry
+ * @num_bt_kills: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_rx_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 rx_agg_mpdu_cnt;
+ __le32 rx_agg_cnt;
+ __le32 unsupported_mcs;
+ __le32 bogus_cts;
+ __le32 bogus_ack;
+ __le32 rx_byte_count[MAC_INDEX_AUX];
+ __le32 rx_packet_count[MAC_INDEX_AUX];
+ __le32 missed_beacons;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ba_rsp_cnt;
+ __le32 late_rx_handle;
+ __le32 num_bt_kills;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_tx_ntfy
+ *
+ * @hdr: general statistics header
+ * @cts_timeout: timeout when waiting for CTS
+ * @ack_timeout: timeout when waiting for ACK
+ * @dump_msdu_cnt: number of MSDUs that were dumped due to any
+ * reason
+ * @burst_abort_missing_next_frame_cnt: number of times a burst
+ * was aborted due to missing next frame bytes in txfifo
+ * number of times got timeout when waiting for CTS/ACK/BA and energy was
+ * detected just after sending the RTS/DATA. this statistics may help getting
+ * interesting indicators, like the likelihood of collision (so the benefit of
+ * protection may be estimated Vs. its cost). Or how many of the failures are
+ * due to collision and how many due to SNR.
+ * For Link-quality the CTS collision indication is more reliable then the ACK
+ * collision indication as the RTS frame is short and has more chance that the
+ * frame/s which caused the collision continue after the RTS was sent.
+ * @cts_timeout_collision: ???
+ * ACK/BA failed and energy as detected after DATA
+ * Note: to get the collision ratio need to:
+ * ackOrBaTimeoutCollision / (ack_timeout + ba_timeout)
+ * @ack_or_ba_timeout_collision: ???
+ * @ba_timeout: timeout when waiting for immediate BA response
+ * @ba_reschedule_frames: failed to get BA response and
+ * rescheduled all the non-ACKed frames
+ * gives the avarage number of frames inside aggregation
+ * @scd_query_agg_frame_cnt: ???
+ * @scd_query_no_agg: scheduler query prevented aggregation
+ * @scd_query_agg: scheduler query allowed aggregation
+ * @scd_query_mismatch: scheduler query inaccurate, either too
+ * short or too long
+ * @agg_terminated_underrun: aggregation was terminated due to
+ * underrun
+ * @agg_terminated_bt_prio_kill: aggregation was terminated due
+ * to BT
+ * @tx_kill_on_long_retry: count the tx frames dropped due to
+ * long retry limit (DATA frame failed)
+ * @tx_kill_on_short_retry: count the tx frames dropped due to
+ * short retry limit (RTS frame failed)
+ * TX deffer on energy. This counter is reset on each successful transmit.
+ * When timer exceed TX deffer limit than will be uCode assert.
+ * @tx_deffer_counter: ???
+ * @tx_deffer_base_time: Keep the time of the last successful
+ * transmit
+ * @tx_underrun: TX killed due to underrun
+ * @bt_defer: TX deferred due to BT priority, so probably TX was
+ * not started.
+ * @tx_kill_on_dsp_timeout: TX killed on DSP problem detected
+ * @tx_kill_on_immediate_quiet: TX killed due to immediate quiet
+ * @kill_ba_cnt: number of times sending BA failed
+ * @kill_ack_cnt: number of times sending ACK failed
+ * @kill_cts_cnt: number of times sending CTS failed
+ * @burst_terminated: Count burst or fragmentation termination
+ * occurrence
+ * @late_tx_vec_wr_cnt: ???
+ * TX is not sent because ucode failed to notify the TRM in SIFS-delta from
+ * ON_AIR deassertion.
+ * @late_rx2_tx_cnt: ???
+ * @scd_query_cnt: count the times SCD query was done to check
+ * for TX AGG
+ * @tx_frames_acked_in_agg: count the number of frames
+ * transmitted inside AGG and were successful
+ * @last_tx_ch_width_indx: ???
+ * number of deferred TX per channel width, 0 - 20, 1/2/3 - 40/80/160
+ * @rx_detected_per_ch_width: ???
+ * @success_per_ch_width: ???
+ * @fail_per_ch_width: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_tx_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 agg_terminated_underrun;
+ __le32 agg_terminated_bt_prio_kill;
+ __le32 tx_kill_on_long_retry;
+ __le32 tx_kill_on_short_retry;
+ __le32 tx_deffer_counter;
+ __le32 tx_deffer_base_time;
+ __le32 tx_underrun;
+ __le32 bt_defer;
+ __le32 tx_kill_on_dsp_timeout;
+ __le32 tx_kill_on_immediate_quiet;
+ __le32 kill_ba_cnt;
+ __le32 kill_ack_cnt;
+ __le32 kill_cts_cnt;
+ __le32 burst_terminated;
+ __le32 late_tx_vec_wr_cnt;
+ __le32 late_rx2_tx_cnt;
+ __le32 scd_query_cnt;
+ __le32 tx_frames_acked_in_agg;
+ __le32 last_tx_ch_width_indx;
+ __le32 rx_detected_per_ch_width[4];
+ __le32 success_per_ch_width[4];
+ __le32 fail_per_ch_width[4];
+ __le32 reserved;
+} __packed; /* STATISTICS_TX_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_duration_ntfy
+ *
+ * @hdr: general statistics header
+ * @cont_burst_chk_cnt: number of times continuation or
+ * fragmentation or bursting was checked
+ * @cont_burst_cnt: number of times continuation or fragmentation
+ * or bursting was successful
+ * @wait_for_silence_timeout_cnt: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_duration_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 cont_burst_chk_cnt;
+ __le32 cont_burst_cnt;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved;
+} __packed; /* STATISTICS_DURATION_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_he_ntfy
+ *
+ * @hdr: general statistics header
+ * received HE frames
+ * @rx_siga_valid_cnt: rx HE SIG-A valid
+ * @rx_siga_invalid_cnt: rx HE SIG-A invalid
+ * received HE frames w/ valid Sig-A
+ * @rx_trig_based_frame_cnt: rx HE-TB (trig-based)
+ * @rx_su_frame_cnt: rx HE-SU
+ * @rx_sigb_invalid_cnt: rx (suspected) HE-MU w/ bad SIG-B
+ * @rx_our_bss_color_cnt: rx valid HE SIG-A w/ our BSS color
+ * @rx_other_bss_color_cnt: rx valid HE SIG-A w/ other BSS color
+ * @rx_zero_bss_color_cnt: ???
+ * received HE-MU frames w/ good Sig-B
+ * @rx_mu_for_us_cnt: match AID
+ * @rx_mu_not_for_us_cnt: no matched AID
+ * received HE-MU frames for us (w/ our AID)
+ * @rx_mu_nss_ar: 0 - SISO, 1 - MIMO2
+ * @rx_mu_mimo_cnt: full BW RU, compressed SIG-B
+ * @rx_mu_ru_bw_ar: MU alloc, MHz: 0 - 2, 1 - 5, 2 - 10, 3 - 20,
+ * 4 - 40, 5 - 80, 6 - 160
+ * received trigger frames
+ * @rx_trig_for_us_cnt: ???
+ * @rx_trig_not_for_us_cnt: ???
+ * trigger for us
+ * @rx_trig_with_cs_req_cnt: ???
+ * @rx_trig_type_ar: ???
+ * @rx_trig_in_agg_cnt: ???
+ * basic trigger for us allocations
+ * @rx_basic_trig_alloc_nss_ar: ???
+ * @rx_basic_trig_alloc_mu_mimo_cnt: ???
+ * @rx_basic_trig_alloc_ru_bw_ar: ???
+ * @rx_basic_trig_total_byte_cnt: ???
+ * trig-based TX
+ * @tx_trig_based_cs_req_fail_cnt: ???
+ * @tx_trig_based_sifs_ok_cnt: ???
+ * @tx_trig_based_sifs_fail_cnt: ???
+ * @tx_trig_based_byte_cnt: ???
+ * @tx_trig_based_pad_byte_cnt: ???
+ * @tx_trig_based_frame_cnt: ???
+ * @tx_trig_based_acked_frame_cnt: ???
+ * @tx_trig_based_ack_timeout_cnt: ???
+ * HE-SU TX
+ * @tx_su_frame_cnt: ???
+ * EDCA <--> MU-EDCA transitions
+ * @tx_edca_to_mu_edca_cnt: ???
+ * @tx_mu_edca_to_edca_by_timeout_cnt: ???
+ * @tx_mu_edca_to_edca_by_ack_fail_cnt: ???
+ * @tx_mu_edca_to_edca_by_small_alloc_cnt: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_he_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 rx_siga_valid_cnt;
+ __le32 rx_siga_invalid_cnt;
+ __le32 rx_trig_based_frame_cnt;
+ __le32 rx_su_frame_cnt;
+ __le32 rx_sigb_invalid_cnt;
+ __le32 rx_our_bss_color_cnt;
+ __le32 rx_other_bss_color_cnt;
+ __le32 rx_zero_bss_color_cnt;
+ __le32 rx_mu_for_us_cnt;
+ __le32 rx_mu_not_for_us_cnt;
+ __le32 rx_mu_nss_ar[2];
+ __le32 rx_mu_mimo_cnt;
+ __le32 rx_mu_ru_bw_ar[7];
+ __le32 rx_trig_for_us_cnt;
+ __le32 rx_trig_not_for_us_cnt;
+ __le32 rx_trig_with_cs_req_cnt;
+ __le32 rx_trig_type_ar[8 + 1];
+ __le32 rx_trig_in_agg_cnt;
+ __le32 rx_basic_trig_alloc_nss_ar[2];
+ __le32 rx_basic_trig_alloc_mu_mimo_cnt;
+ __le32 rx_basic_trig_alloc_ru_bw_ar[7];
+ __le32 rx_basic_trig_total_byte_cnt;
+ __le32 tx_trig_based_cs_req_fail_cnt;
+ __le32 tx_trig_based_sifs_ok_cnt;
+ __le32 tx_trig_based_sifs_fail_cnt;
+ __le32 tx_trig_based_byte_cnt;
+ __le32 tx_trig_based_pad_byte_cnt;
+ __le32 tx_trig_based_frame_cnt;
+ __le32 tx_trig_based_acked_frame_cnt;
+ __le32 tx_trig_based_ack_timeout_cnt;
+ __le32 tx_su_frame_cnt;
+ __le32 tx_edca_to_mu_edca_cnt;
+ __le32 tx_mu_edca_to_edca_by_timeout_cnt;
+ __le32 tx_mu_edca_to_edca_by_ack_fail_cnt;
+ __le32 tx_mu_edca_to_edca_by_small_alloc_cnt;
+ __le32 reserved;
+} __packed; /* STATISTICS_HE_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/system.h b/drivers/net/wireless/intel/iwlwifi/fw/api/system.h
new file mode 100644
index 000000000..acf5d4b9a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/system.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_system_h__
+#define __iwl_fw_api_system_h__
+
+#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
+#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
+
+#define SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
+#define SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
+#define SOC_FLAGS_LTR_APPLY_DELAY_200 1
+#define SOC_FLAGS_LTR_APPLY_DELAY_2500 2
+#define SOC_FLAGS_LTR_APPLY_DELAY_1820 3
+
+/**
+ * struct iwl_soc_configuration_cmd - Set device stabilization latency
+ *
+ * @flags: soc settings flags. In VER_1, we can only set the DISCRETE
+ * flag, because the FW treats the whole value as an integer. In
+ * VER_2, we can set the bits independently.
+ * @latency: time for SOC to ensure stable power & XTAL
+ */
+struct iwl_soc_configuration_cmd {
+ __le32 flags;
+ __le32 latency;
+} __packed; /*
+ * SOC_CONFIGURATION_CMD_S_VER_1 (see description above)
+ * SOC_CONFIGURATION_CMD_S_VER_2
+ */
+
+/**
+ * struct iwl_system_features_control_cmd - system features control command
+ * @features: bitmap of features to disable
+ */
+struct iwl_system_features_control_cmd {
+ __le32 features[4];
+} __packed; /* SYSTEM_FEATURES_CONTROL_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_system_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
new file mode 100644
index 000000000..893438aad
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_tdls_h__
+#define __iwl_fw_api_tdls_h__
+
+#include "fw/api/tx.h"
+#include "fw/api/phy-ctxt.h"
+
+#define IWL_MVM_TDLS_STA_COUNT 4
+
+/* Type of TDLS request */
+enum iwl_tdls_channel_switch_type {
+ TDLS_SEND_CHAN_SW_REQ = 0,
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
+ TDLS_MOVE_CH,
+}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch
+ * @frame_timestamp: GP2 timestamp of channel-switch request/response packet
+ * received from peer
+ * @max_offchan_duration: What amount of microseconds out of a DTIM is given
+ * to the TDLS off-channel communication. For instance if the DTIM is
+ * 200TU and the TDLS peer is to be given 25% of the time, the value
+ * given will be 50TU, or 50 * 1024 if translated into microseconds.
+ * @switch_time: switch time the peer sent in its channel switch timing IE
+ * @switch_timeout: switch timeout the peer sent in its channel switch timing IE
+ */
+struct iwl_tdls_channel_switch_timing {
+ __le32 frame_timestamp; /* GP2 time of peer packet Rx */
+ __le32 max_offchan_duration; /* given in micro-seconds */
+ __le32 switch_time; /* given in micro-seconds */
+ __le32 switch_timeout; /* given in micro-seconds */
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
+
+#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
+
+/**
+ * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template
+ *
+ * A template representing a TDLS channel-switch request or response frame
+ *
+ * @switch_time_offset: offset to the channel switch timing IE in the template
+ * @tx_cmd: Tx parameters for the frame
+ * @data: frame data
+ */
+struct iwl_tdls_channel_switch_frame {
+ __le32 switch_time_offset;
+ struct iwl_tx_cmd tx_cmd;
+ u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd
+ *
+ * @timing: timing related data for command
+ * @frame: channel-switch request/response template, depending to switch_type
+ */
+struct iwl_tdls_channel_switch_cmd_tail {
+ struct iwl_tdls_channel_switch_timing timing;
+ struct iwl_tdls_channel_switch_frame frame;
+} __packed;
+
+/**
+ * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
+ *
+ * The command is sent to initiate a channel switch and also in response to
+ * incoming TDLS channel-switch request/response packets from remote peers.
+ *
+ * @switch_type: see &enum iwl_tdls_channel_switch_type
+ * @peer_sta_id: station id of TDLS peer
+ * @ci: channel we switch to
+ * @tail: command tail
+ */
+struct iwl_tdls_channel_switch_cmd {
+ u8 switch_type;
+ __le32 peer_sta_id;
+ struct iwl_fw_channel_info ci;
+ struct iwl_tdls_channel_switch_cmd_tail tail;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification
+ *
+ * @status: non-zero on success
+ * @offchannel_duration: duration given in microseconds
+ * @sta_id: peer currently performing the channel-switch with
+ */
+struct iwl_tdls_channel_switch_notif {
+ __le32 status;
+ __le32 offchannel_duration;
+ __le32 sta_id;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_sta_info - TDLS station info
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
+ * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
+ * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
+ */
+struct iwl_tdls_sta_info {
+ u8 sta_id;
+ u8 tx_to_peer_tid;
+ __le16 tx_to_peer_ssn;
+ __le32 is_initiator;
+} __packed; /* TDLS_STA_INFO_VER_1 */
+
+/**
+ * struct iwl_tdls_config_cmd - TDLS basic config command
+ *
+ * @id_and_color: MAC id and color being configured
+ * @tdls_peer_count: amount of currently connected TDLS peers
+ * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
+ * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
+ * @sta_info: per-station info. Only the first tdls_peer_count entries are set
+ * @pti_req_data_offset: offset of network-level data for the PTI template
+ * @pti_req_tx_cmd: Tx parameters for PTI request template
+ * @pti_req_template: PTI request template data
+ */
+struct iwl_tdls_config_cmd {
+ __le32 id_and_color; /* mac id and color */
+ u8 tdls_peer_count;
+ u8 tx_to_ap_tid;
+ __le16 tx_to_ap_ssn;
+ struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+
+ __le32 pti_req_data_offset;
+ struct iwl_tx_cmd pti_req_tx_cmd;
+ u8 pti_req_template[];
+} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_config_sta_info_res - TDLS per-station config information
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
+ * the peer
+ */
+struct iwl_tdls_config_sta_info_res {
+ __le16 sta_id;
+ __le16 tx_to_peer_last_seq;
+} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
+
+/**
+ * struct iwl_tdls_config_res - TDLS config information from FW
+ *
+ * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
+ * @sta_info: per-station TDLS config information
+ */
+struct iwl_tdls_config_res {
+ __le32 tx_to_ap_last_seq;
+ struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_tdls_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
new file mode 100644
index 000000000..904cd78a9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_time_event_h__
+#define __iwl_fw_api_time_event_h__
+
+#include "fw/api/phy-ctxt.h"
+
+/* Time Event types, according to MAC type */
+enum iwl_time_event_type {
+ /* BSS Station Events */
+ TE_BSS_STA_AGGRESSIVE_ASSOC,
+ TE_BSS_STA_ASSOC,
+ TE_BSS_EAP_DHCP_PROT,
+ TE_BSS_QUIET_PERIOD,
+
+ /* P2P Device Events */
+ TE_P2P_DEVICE_DISCOVERABLE,
+ TE_P2P_DEVICE_LISTEN,
+ TE_P2P_DEVICE_ACTION_SCAN,
+ TE_P2P_DEVICE_FULL_SCAN,
+
+ /* P2P Client Events */
+ TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+ TE_P2P_CLIENT_ASSOC,
+ TE_P2P_CLIENT_QUIET_PERIOD,
+
+ /* P2P GO Events */
+ TE_P2P_GO_ASSOC_PROT,
+ TE_P2P_GO_REPETITIVET_NOA,
+ TE_P2P_GO_CT_WINDOW,
+
+ /* WiDi Sync Events */
+ TE_WIDI_TX_SYNC,
+
+ /* Channel Switch NoA */
+ TE_CHANNEL_SWITCH_PERIOD,
+
+ TE_MAX
+}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+/* Time event - defines for command API v1 */
+
+/*
+ * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ TE_V1_FRAG_NONE = 0,
+ TE_V1_FRAG_SINGLE = 1,
+ TE_V1_FRAG_DUAL = 2,
+ TE_V1_FRAG_ENDLESS = 0xffffffff
+};
+
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_V1_FRAG_MAX_MSK 0x0fffffff
+/* Repeat the time event endlessly (until removed) */
+#define TE_V1_REPEAT_ENDLESS 0xffffffff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
+
+/* Time Event dependencies: none, on another TE, or in a specific time */
+enum {
+ TE_V1_INDEPENDENT = 0,
+ TE_V1_DEP_OTHER = BIT(0),
+ TE_V1_DEP_TSF = BIT(1),
+ TE_V1_EVENT_SOCIOPATHIC = BIT(2),
+}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
+/*
+ * @TE_V1_NOTIF_NONE: no notifications
+ * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ *
+ * Supported Time event notifications configuration.
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ */
+enum {
+ TE_V1_NOTIF_NONE = 0,
+ TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
+ TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
+ TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
+ TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
+ TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
+ TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
+ TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
+ TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
+}; /* MAC_EVENT_ACTION_API_E_VER_2 */
+
+/* Time event - defines for command API */
+
+/*
+ * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ TE_V2_FRAG_NONE = 0,
+ TE_V2_FRAG_SINGLE = 1,
+ TE_V2_FRAG_DUAL = 2,
+ TE_V2_FRAG_MAX = 0xfe,
+ TE_V2_FRAG_ENDLESS = 0xff
+};
+
+/* Repeat the time event endlessly (until removed) */
+#define TE_V2_REPEAT_ENDLESS 0xff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V2_REPEAT_MAX 0xfe
+
+#define TE_V2_PLACEMENT_POS 12
+#define TE_V2_ABSENCE_POS 15
+
+/**
+ * enum iwl_time_event_policy - Time event policy values
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @TE_V2_START_IMMEDIATELY: start time event immediately
+ * @TE_V2_DEP_OTHER: depends on another time event
+ * @TE_V2_DEP_TSF: depends on a specific time
+ * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+enum iwl_time_event_policy {
+ TE_V2_DEFAULT_POLICY = 0x0,
+
+ /* notifications (event start/stop, fragment start/stop) */
+ TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
+ TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
+ TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
+ TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
+
+ TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
+ TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
+ TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
+ TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+ TE_V2_START_IMMEDIATELY = BIT(11),
+
+ /* placement characteristics */
+ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
+ TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
+ TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
+
+ /* are we present or absent during the Time Event. */
+ TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
+};
+
+/**
+ * struct iwl_time_event_cmd - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWL_UCODE_TLV_FLAGS)
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC,
+ * &enum iwl_ctxt_id_and_color
+ * @action: action to perform, one of &enum iwl_ctxt_action
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ * TE_EVENT_SOCIOPATHIC
+ * using TE_ABSENCE and using TE_NOTIF_*,
+ * &enum iwl_time_event_policy
+ */
+struct iwl_time_event_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 id;
+ /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
+ __le32 apply_time;
+ __le32 max_delay;
+ __le32 depends_on;
+ __le32 interval;
+ __le32 duration;
+ u8 repeat;
+ u8 max_frags;
+ __le16 policy;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC,
+ * &enum iwl_ctxt_id_and_color
+ */
+struct iwl_time_event_resp {
+ __le32 status;
+ __le32 id;
+ __le32 unique_id;
+ __le32 id_and_color;
+} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_notif - notifications of time event start/stop
+ * ( TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: &enum iwl_time_event_policy
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwl_time_event_notif {
+ __le32 timestamp;
+ __le32 session_id;
+ __le32 unique_id;
+ __le32 id_and_color;
+ __le32 action;
+ __le32 status;
+} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+/*
+ * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req
+ *
+ * @node_addr: Our MAC Address
+ * @reserved: reserved for alignment
+ * @apply_time: GP2 value to start (should always be the current GP2 value)
+ * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
+ * time by which start of the event is allowed to be postponed.
+ * @duration: event duration in TU To calculate event duration:
+ * timeEventDuration = min(duration, remainingQuota)
+ */
+struct iwl_hs20_roc_req_tail {
+ u8 node_addr[ETH_ALEN];
+ __le16 reserved;
+ __le32 apply_time;
+ __le32 apply_time_max_delay;
+ __le32 duration;
+} __packed;
+
+/*
+ * Aux ROC command
+ *
+ * Command requests the firmware to create a time event for a certain duration
+ * and remain on the given channel. This is done by using the Aux framework in
+ * the FW.
+ * The command was first used for Hot Spot issues - but can be used regardless
+ * to Hot Spot.
+ *
+ * ( HOT_SPOT_CMD 0x53 )
+ *
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the
+ * event_unique_id should be the id of the time event assigned by ucode.
+ * Otherwise ignore the event_unique_id.
+ * @sta_id_and_color: station id and color, resumed during "Remain On Channel"
+ * activity.
+ * @channel_info: channel info
+ */
+struct iwl_hs20_roc_req {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 event_unique_id;
+ __le32 sta_id_and_color;
+ struct iwl_fw_channel_info channel_info;
+ struct iwl_hs20_roc_req_tail tail;
+} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
+
+/*
+ * values for AUX ROC result values
+ */
+enum iwl_mvm_hot_spot {
+ HOT_SPOT_RSP_STATUS_OK,
+ HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
+ HOT_SPOT_MAX_NUM_OF_SESSIONS,
+};
+
+/*
+ * Aux ROC command response
+ *
+ * In response to iwl_hs20_roc_req the FW sends this command to notify the
+ * driver the uid of the timevent.
+ *
+ * ( HOT_SPOT_CMD 0x53 )
+ *
+ * @event_unique_id: Unique ID of time event assigned by ucode
+ * @status: Return status 0 is success, all the rest used for specific errors
+ */
+struct iwl_hs20_roc_res {
+ __le32 event_unique_id;
+ __le32 status;
+} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+
+/**
+ * enum iwl_mvm_session_prot_conf_id - session protection's configurations
+ * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
+ * The firmware will allocate two events.
+ * Valid for BSS_STA and P2P_STA.
+ * * A rather short event that can't be fragmented and with a very
+ * high priority. If every goes well (99% of the cases) the
+ * association should complete within this first event. During
+ * that event, no other activity will happen in the firmware,
+ * which is why it can't be too long.
+ * The length of this event is hard-coded in the firmware: 300TUs.
+ * * Another event which can be much longer (it's duration is
+ * configurable by the driver) which has a slightly lower
+ * priority and that can be fragmented allowing other activities
+ * to run while this event is running.
+ * The firmware will automatically remove both events once the driver sets
+ * the BSS MAC as associated. Neither of the events will be removed
+ * for the P2P_STA MAC.
+ * Only the duration is configurable for this protection.
+ * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
+ * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
+ * listen mode. Will be fragmented. Valid only on the P2P Device MAC.
+ * Valid only on the P2P Device MAC. The firmware will take into account
+ * the duration, the interval and the repetition count.
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * able to run the GO Negotiation. Will not be fragmented and not
+ * repetitive. Valid only on the P2P Device MAC. Only the duration will
+ * be taken into account.
+ * @SESSION_PROTECT_CONF_MAX_ID: not used
+ */
+enum iwl_mvm_session_prot_conf_id {
+ SESSION_PROTECT_CONF_ASSOC,
+ SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+ SESSION_PROTECT_CONF_MAX_ID,
+}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_cmd - configure a session protection
+ * @id_and_color: the id and color of the mac for which this session protection
+ * is sent
+ * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
+ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ * @duration_tu: the duration of the whole protection in TUs.
+ * @repetition_count: not used
+ * @interval: not used
+ *
+ * Note: the session protection will always be scheduled to start as
+ * early as possible, but the maximum delay is configuration dependent.
+ * The firmware supports only one concurrent session protection per vif.
+ * Adding a new session protection will remove any currently running session.
+ */
+struct iwl_mvm_session_prot_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 conf_id;
+ __le32 duration_tu;
+ __le32 repetition_count;
+ __le32 interval;
+} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_session_prot_notif - session protection started / ended
+ * @mac_id: the mac id for which the session protection started / ended
+ * @status: 1 means success, 0 means failure
+ * @start: 1 means the session protection started, 0 means it ended
+ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+ *
+ * Note that any session protection will always get two notifications: start
+ * and end even the firmware could not schedule it.
+ */
+struct iwl_mvm_session_prot_notif {
+ __le32 mac_id;
+ __le32 status;
+ __le32 start;
+ __le32 conf_id;
+} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
new file mode 100644
index 000000000..ecc6706f6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -0,0 +1,966 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_tx_h__
+#define __iwl_fw_api_tx_h__
+#include <linux/ieee80211.h>
+
+/**
+ * enum iwl_tx_flags - bitmasks for tx_flags in TX command
+ * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame
+ * @TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ * Otherwise, use rate_n_flags from the TX command
+ * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ * Must set TX_CMD_FLG_ACK with this flag.
+ * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested
+ * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_PRIO_MASK: BT priority value
+ * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
+ * on old firmwares).
+ * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ * Should be set for beacons and probe responses
+ * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ * Should be set for 26/30 length MAC headers
+ * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+enum iwl_tx_flags {
+ TX_CMD_FLG_PROT_REQUIRE = BIT(0),
+ TX_CMD_FLG_WRITE_TX_POWER = BIT(1),
+ TX_CMD_FLG_ACK = BIT(3),
+ TX_CMD_FLG_STA_RATE = BIT(4),
+ TX_CMD_FLG_BAR = BIT(6),
+ TX_CMD_FLG_TXOP_PROT = BIT(7),
+ TX_CMD_FLG_VHT_NDPA = BIT(8),
+ TX_CMD_FLG_HT_NDPA = BIT(9),
+ TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
+ TX_CMD_FLG_BT_PRIO_POS = 11,
+ TX_CMD_FLG_BT_PRIO_MASK = BIT(11) | BIT(12),
+ TX_CMD_FLG_BT_DIS = BIT(12),
+ TX_CMD_FLG_SEQ_CTL = BIT(13),
+ TX_CMD_FLG_MORE_FRAG = BIT(14),
+ TX_CMD_FLG_TSF = BIT(16),
+ TX_CMD_FLG_CALIB = BIT(17),
+ TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
+ TX_CMD_FLG_MH_PAD = BIT(20),
+ TX_CMD_FLG_RESP_TO_DRV = BIT(21),
+ TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
+ TX_CMD_FLG_DUR = BIT(25),
+ TX_CMD_FLG_FW_DROP = BIT(26),
+ TX_CMD_FLG_EXEC_PAPD = BIT(27),
+ TX_CMD_FLG_PAPD_TYPE = BIT(28),
+ TX_CMD_FLG_HCCA_CHUNK = BIT(31)
+}; /* TX_FLAGS_BITS_API_S_VER_1 */
+
+/**
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
+ * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ * to a secured STA
+ * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ * selection, retry limits and BT kill
+ */
+enum iwl_tx_cmd_flags {
+ IWL_TX_FLAGS_CMD_RATE = BIT(0),
+ IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
+ IWL_TX_FLAGS_HIGH_PRI = BIT(2),
+ /* Use these flags only from
+ * TX_FLAGS_BITS_API_S_VER_4 and above */
+ IWL_TX_FLAGS_RTS = BIT(3),
+ IWL_TX_FLAGS_CTS = BIT(4),
+}; /* TX_FLAGS_BITS_API_S_VER_3 */
+
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+ PM_FRAME_NONE = 0,
+ PM_FRAME_MGMT = 2,
+ PM_FRAME_ASSOC = 3,
+};
+
+#define TX_CMD_SEC_MSK 0x07
+#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+
+/**
+ * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command
+ * @TX_CMD_SEC_WEP: WEP encryption algorithm.
+ * @TX_CMD_SEC_CCM: CCM encryption algorithm.
+ * @TX_CMD_SEC_TKIP: TKIP encryption algorithm.
+ * @TX_CMD_SEC_EXT: extended cipher algorithm.
+ * @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
+ * @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
+ * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
+ * from the table instead of from the TX command.
+ * If the key is taken from the key table its index should be given by the
+ * first byte of the TX command key field.
+ */
+enum iwl_tx_cmd_sec_ctrl {
+ TX_CMD_SEC_WEP = 0x01,
+ TX_CMD_SEC_CCM = 0x02,
+ TX_CMD_SEC_TKIP = 0x03,
+ TX_CMD_SEC_EXT = 0x04,
+ TX_CMD_SEC_GCMP = 0x05,
+ TX_CMD_SEC_KEY128 = 0x08,
+ TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
+};
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWL_TID_NON_QOS 0
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWL_DEFAULT_TX_RETRY 15
+#define IWL_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWL_RTS_DFAULT_RETRY_LIMIT 60
+#define IWL_BAR_DFAULT_RETRY_LIMIT 60
+#define IWL_LOW_RETRY_LIMIT 7
+
+/**
+ * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
+ * from mac header end. For normal case it is 4 words for SNAP.
+ * note: tx_cmd, mac header and pad are not counted in the offset.
+ * This is used to help the offload in case there is tunneling such as
+ * IPv6 in IPv4, in such case the ip header offset should point to the
+ * inner ip header and IPv4 checksum of the external header should be
+ * calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ * field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ * alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+ TX_CMD_OFFLD_IP_HDR = 0,
+ TX_CMD_OFFLD_L4_EN = 6,
+ TX_CMD_OFFLD_L3_EN = 7,
+ TX_CMD_OFFLD_MH_SIZE = 8,
+ TX_CMD_OFFLD_PAD = 13,
+ TX_CMD_OFFLD_AMSDU = 14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+
+enum iwl_tx_offload_assist_bz {
+ IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff,
+ IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800,
+ IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000,
+ IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000,
+ IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000,
+ IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000,
+ IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000,
+ IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000,
+};
+
+/* TODO: complete documentation for try_cnt and btkill_cnt */
+/**
+ * struct iwl_tx_cmd - TX command struct to FW
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @tx_flags: combination of TX_CMD_FLG_*, see &enum iwl_tx_flags
+ * @scratch: scratch buffer used by the device
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @sec_ctl: security control, TX_CMD_SEC_*
+ * @initial_rate_index: index into the the rate table for initial TX attempt.
+ * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @reserved2: reserved
+ * @key: security key
+ * @reserved3: reserved
+ * @life_time: frame life time (usecs??)
+ * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
+ * btkill_cnd + reserved), first 32 bits. "0" disables usage.
+ * @dram_msb_ptr: upper bits of the scratch physical address
+ * @rts_retry_limit: max attempts for RTS
+ * @data_retry_limit: max attempts to send the data packet
+ * @tid_tspec: TID/tspec
+ * @pm_frame_timeout: PM TX frame timeout
+ * @reserved4: reserved
+ * @payload: payload (same as @hdr)
+ * @hdr: 802.11 header (same as @payload)
+ *
+ * The byte count (both len and next_frame_len) includes MAC header
+ * (24/26/30/32 bytes)
+ * + 2 bytes pad if 26/30 header size
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * It does not include post-MAC padding, i.e.,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
+ * Range of len: 14-2342 bytes.
+ *
+ * After the struct fields the MAC header is placed, plus any padding,
+ * and then the actial payload.
+ */
+struct iwl_tx_cmd {
+ __le16 len;
+ __le16 offload_assist;
+ __le32 tx_flags;
+ struct {
+ u8 try_cnt;
+ u8 btkill_cnt;
+ __le16 reserved;
+ } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
+ __le32 rate_n_flags;
+ u8 sta_id;
+ u8 sec_ctl;
+ u8 initial_rate_index;
+ u8 reserved2;
+ u8 key[16];
+ __le32 reserved3;
+ __le32 life_time;
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 tid_tspec;
+ __le16 pm_frame_timeout;
+ __le16 reserved4;
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
+} __packed; /* TX_CMD_API_S_VER_6 */
+
+struct iwl_dram_sec_info {
+ __le32 pn_low;
+ __le16 pn_high;
+ __le16 aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen2 {
+ __le16 len;
+ __le16 offload_assist;
+ __le32 flags;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ struct ieee80211_hdr hdr[];
+} __packed; /* TX_CMD_API_S_VER_7,
+ TX_CMD_API_S_VER_9 */
+
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @reserved: reserved
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+ __le16 len;
+ __le16 flags;
+ __le32 offload_assist;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ u8 reserved[8];
+ struct ieee80211_hdr hdr[];
+} __packed; /* TX_CMD_API_S_VER_8,
+ TX_CMD_API_S_VER_10 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
+ * @TX_STATUS_SUCCESS:
+ * @TX_STATUS_DIRECT_DONE:
+ * @TX_STATUS_POSTPONE_DELAY:
+ * @TX_STATUS_POSTPONE_FEW_BYTES:
+ * @TX_STATUS_POSTPONE_BT_PRIO:
+ * @TX_STATUS_POSTPONE_QUIET_PERIOD:
+ * @TX_STATUS_POSTPONE_CALC_TTAK:
+ * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ * @TX_STATUS_FAIL_SHORT_LIMIT:
+ * @TX_STATUS_FAIL_LONG_LIMIT:
+ * @TX_STATUS_FAIL_UNDERRUN:
+ * @TX_STATUS_FAIL_DRAIN_FLOW:
+ * @TX_STATUS_FAIL_RFKILL_FLUSH:
+ * @TX_STATUS_FAIL_LIFE_EXPIRE:
+ * @TX_STATUS_FAIL_DEST_PS:
+ * @TX_STATUS_FAIL_HOST_ABORTED:
+ * @TX_STATUS_FAIL_BT_RETRY:
+ * @TX_STATUS_FAIL_STA_INVALID:
+ * @TX_TATUS_FAIL_FRAG_DROPPED:
+ * @TX_STATUS_FAIL_TID_DISABLE:
+ * @TX_STATUS_FAIL_FIFO_FLUSHED:
+ * @TX_STATUS_FAIL_SMALL_CF_POLL:
+ * @TX_STATUS_FAIL_FW_DROP:
+ * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * @TX_FRAME_STATUS_INTERNAL_ABORT:
+ * @TX_MODE_MSK:
+ * @TX_MODE_NO_BURST:
+ * @TX_MODE_IN_BURST_SEQ:
+ * @TX_MODE_FIRST_IN_BURST:
+ * @TX_QUEUE_NUM_MSK:
+ *
+ * Valid only if frame_count =1
+ * TODO: complete documentation
+ */
+enum iwl_tx_status {
+ TX_STATUS_MSK = 0x000000ff,
+ TX_STATUS_SUCCESS = 0x01,
+ TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+ TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_STATUS_FAIL_UNDERRUN = 0x84,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+ TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_STATUS_FAIL_DEST_PS = 0x88,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+ TX_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+ TX_STATUS_FAIL_FW_DROP = 0x90,
+ TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+ TX_STATUS_INTERNAL_ABORT = 0x92,
+ TX_MODE_MSK = 0x00000f00,
+ TX_MODE_NO_BURST = 0x00000000,
+ TX_MODE_IN_BURST_SEQ = 0x00000100,
+ TX_MODE_FIRST_IN_BURST = 0x00000200,
+ TX_QUEUE_NUM_MSK = 0x0001f000,
+ TX_NARROW_BW_MSK = 0x00060000,
+ TX_NARROW_BW_1DIV2 = 0x00020000,
+ TX_NARROW_BW_1DIV4 = 0x00040000,
+ TX_NARROW_BW_1DIV8 = 0x00060000,
+};
+
+/*
+ * enum iwl_tx_agg_status - TX aggregation status
+ * @AGG_TX_STATE_STATUS_MSK:
+ * @AGG_TX_STATE_TRANSMITTED:
+ * @AGG_TX_STATE_UNDERRUN:
+ * @AGG_TX_STATE_BT_PRIO:
+ * @AGG_TX_STATE_FEW_BYTES:
+ * @AGG_TX_STATE_ABORT:
+ * @AGG_TX_STATE_TX_ON_AIR_DROP: TX_ON_AIR signal drop without underrun or
+ * BT detection
+ * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
+ * @AGG_TX_STATE_LAST_SENT_BT_KILL:
+ * @AGG_TX_STATE_SCD_QUERY:
+ * @AGG_TX_STATE_TEST_BAD_CRC32:
+ * @AGG_TX_STATE_RESPONSE:
+ * @AGG_TX_STATE_DUMP_TX:
+ * @AGG_TX_STATE_DELAY_TX:
+ * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ * @AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ *
+ * TODO: complete documentation
+ */
+enum iwl_tx_agg_status {
+ AGG_TX_STATE_STATUS_MSK = 0x00fff,
+ AGG_TX_STATE_TRANSMITTED = 0x000,
+ AGG_TX_STATE_UNDERRUN = 0x001,
+ AGG_TX_STATE_BT_PRIO = 0x002,
+ AGG_TX_STATE_FEW_BYTES = 0x004,
+ AGG_TX_STATE_ABORT = 0x008,
+ AGG_TX_STATE_TX_ON_AIR_DROP = 0x010,
+ AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+ AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+ AGG_TX_STATE_SCD_QUERY = 0x080,
+ AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+ AGG_TX_STATE_RESPONSE = 0x1ff,
+ AGG_TX_STATE_DUMP_TX = 0x200,
+ AGG_TX_STATE_DELAY_TX = 0x400,
+ AGG_TX_STATE_TRY_CNT_POS = 12,
+ AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
+};
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
+ AGG_TX_STATE_ABORT | \
+ AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct agg_tx_status - per packet TX aggregation status
+ * @status: See &enum iwl_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct agg_tx_status {
+ __le16 status;
+ __le16 sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
+#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+ TX_RES_RATE_TABLE_COLOR_POS)
+
+#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status TX_STATUS_*
+ * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count. Length in @frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp_v3 {
+ u8 frame_count;
+ u8 bt_kill_count;
+ u8 failure_rts;
+ u8 failure_frame;
+ __le32 initial_rate;
+ __le16 wireless_media_time;
+
+ u8 pa_status;
+ u8 pa_integ_res_a[3];
+ u8 pa_integ_res_b[3];
+ u8 pa_integ_res_c[3];
+ __le16 measurement_req_id;
+ u8 reduced_tpc;
+ u8 reserved;
+
+ __le32 tfd_info;
+ __le16 seq_ctl;
+ __le16 byte_cnt;
+ u8 tlc_info;
+ u8 ra_tid;
+ __le16 frame_ctrl;
+ struct agg_tx_status status[];
+} __packed; /* TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
+ * @reserved2: reserved for padding/alignment
+ * @status: for non-agg: frame status TX_STATUS_*
+ * For version 6 TX response isn't received for aggregation at all.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp {
+ u8 frame_count;
+ u8 bt_kill_count;
+ u8 failure_rts;
+ u8 failure_frame;
+ __le32 initial_rate;
+ __le16 wireless_media_time;
+
+ u8 pa_status;
+ u8 pa_integ_res_a[3];
+ u8 pa_integ_res_b[3];
+ u8 pa_integ_res_c[3];
+ __le16 measurement_req_id;
+ u8 reduced_tpc;
+ u8 reserved;
+
+ __le32 tfd_info;
+ __le16 seq_ctl;
+ __le16 byte_cnt;
+ u8 tlc_info;
+ u8 ra_tid;
+ __le16 frame_ctrl;
+ __le16 tx_queue;
+ __le16 reserved2;
+ struct agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_6,
+ TX_RSP_API_S_VER_7 */
+
+/**
+ * struct iwl_mvm_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @sta_addr: MAC address
+ * @reserved: reserved
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl: sequence control field
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @reserved1: reserved
+ */
+struct iwl_mvm_ba_notif {
+ u8 sta_addr[ETH_ALEN];
+ __le16 reserved;
+
+ u8 sta_id;
+ u8 tid;
+ __le16 seq_ctl;
+ __le64 bitmap;
+ __le16 scd_flow;
+ __le16 scd_ssn;
+ u8 txed;
+ u8 txed_2_done;
+ u8 reduced_txp;
+ u8 reserved1;
+} __packed;
+
+/**
+ * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
+ * @tid: TID of the queue (0-7)
+ * @reserved: reserved for alignment
+ */
+struct iwl_mvm_compressed_ba_tfd {
+ __le16 q_num;
+ __le16 tfd_index;
+ u8 scd_queue;
+ u8 tid;
+ u8 reserved[2];
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwl_mvm_compressed_ba_ratid {
+ u8 q_num;
+ u8 tid;
+ __le16 ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwl_mvm_ba_resp_flags - TX aggregation status
+ * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ * expected time
+ */
+enum iwl_mvm_ba_resp_flags {
+ IWL_MVM_BA_RESP_TX_AGG,
+ IWL_MVM_BA_RESP_TX_BAR,
+ IWL_MVM_BA_RESP_TX_AGG_FAIL,
+ IWL_MVM_BA_RESP_TX_UNDERRUN,
+ IWL_MVM_BA_RESP_TX_BT_KILL,
+ IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwl_mvm_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @tlc_rate_info: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @reserved: reserved (for alignment)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ * for details. Length in @tfd_cnt.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
+ */
+struct iwl_mvm_compressed_ba_notif {
+ __le32 flags;
+ u8 sta_id;
+ u8 reduced_txp;
+ u8 tlc_rate_info;
+ u8 retry_cnt;
+ __le32 query_byte_cnt;
+ __le16 query_frame_cnt;
+ __le16 txed;
+ __le16 done;
+ __le16 reserved;
+ __le32 wireless_time;
+ __le32 tx_rate;
+ __le16 tfd_cnt;
+ __le16 ra_tid_cnt;
+ union {
+ DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid);
+ DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd);
+ };
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
+ COMPRESSED_BA_RES_API_S_VER_5 */
+
+/**
+ * struct iwl_mac_beacon_cmd_v6 - beacon template command
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ * mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd_v6 {
+ struct iwl_tx_cmd tx;
+ __le32 template_id;
+ __le32 tim_idx;
+ __le32 tim_size;
+ struct ieee80211_hdr frame[];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ * mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @ecsa_offset: offset to the ECSA IE if present
+ * @csa_offset: offset to the CSA IE if present
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd_v7 {
+ struct iwl_tx_cmd tx;
+ __le32 template_id;
+ __le32 tim_idx;
+ __le32 tim_size;
+ __le32 ecsa_offset;
+ __le32 csa_offset;
+ struct ieee80211_hdr frame[];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
+
+/* Bit flags for BEACON_TEMPLATE_CMD_API until version 10 */
+enum iwl_mac_beacon_flags_v1 {
+ IWL_MAC_BEACON_CCK_V1 = BIT(8),
+ IWL_MAC_BEACON_ANT_A_V1 = BIT(9),
+ IWL_MAC_BEACON_ANT_B_V1 = BIT(10),
+ IWL_MAC_BEACON_FILS_V1 = BIT(12),
+};
+
+/* Bit flags for BEACON_TEMPLATE_CMD_API version 11 and above */
+enum iwl_mac_beacon_flags {
+ IWL_MAC_BEACON_CCK = BIT(5),
+ IWL_MAC_BEACON_ANT_A = BIT(6),
+ IWL_MAC_BEACON_ANT_B = BIT(7),
+ IWL_MAC_BEACON_FILS = BIT(8),
+};
+
+/**
+ * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
+ * @byte_cnt: byte count of the beacon frame.
+ * @flags: least significant byte for rate code. The most significant byte
+ * is &enum iwl_mac_beacon_flags.
+ * @short_ssid: Short SSID
+ * @reserved: reserved
+ * @template_id: currently equal to the mac context id of the coresponding mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @ecsa_offset: offset to the ECSA IE if present
+ * @csa_offset: offset to the CSA IE if present
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd {
+ __le16 byte_cnt;
+ __le16 flags;
+ __le32 short_ssid;
+ __le32 reserved;
+ __le32 template_id;
+ __le32 tim_idx;
+ __le32 tim_size;
+ __le32 ecsa_offset;
+ __le32 csa_offset;
+ struct ieee80211_hdr frame[];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10,
+ BEACON_TEMPLATE_CMD_API_S_VER_11,
+ BEACON_TEMPLATE_CMD_API_S_VER_12 */
+
+struct iwl_beacon_notif {
+ struct iwl_mvm_tx_resp beacon_notify_hdr;
+ __le64 tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
+/**
+ * struct iwl_extended_beacon_notif_v5 - notifies about beacon transmission
+ * @beacon_notify_hdr: tx response command associated with the beacon
+ * @tsf: last beacon tsf
+ * @ibss_mgr_status: whether IBSS is manager
+ * @gp2: last beacon time in gp2
+ */
+struct iwl_extended_beacon_notif_v5 {
+ struct iwl_mvm_tx_resp beacon_notify_hdr;
+ __le64 tsf;
+ __le32 ibss_mgr_status;
+ __le32 gp2;
+} __packed; /* BEACON_NTFY_API_S_VER_5 */
+
+/**
+ * struct iwl_extended_beacon_notif - notifies about beacon transmission
+ * @status: the status of the Tx response of the beacon
+ * @tsf: last beacon tsf
+ * @ibss_mgr_status: whether IBSS is manager
+ * @gp2: last beacon time in gp2
+ */
+struct iwl_extended_beacon_notif {
+ __le32 status;
+ __le64 tsf;
+ __le32 ibss_mgr_status;
+ __le32 gp2;
+} __packed; /* BEACON_NTFY_API_S_VER_6_ */
+
+/**
+ * enum iwl_dump_control - dump (flush) control flags
+ * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ * and the TFD queues are empty.
+ */
+enum iwl_dump_control {
+ DUMP_TX_FIFO_FLUSH = BIT(1),
+};
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd_v1 {
+ __le32 queues_ctl;
+ __le16 flush_ctl;
+ __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+ __le32 sta_id;
+ __le16 tid_mask;
+ __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
+#define IWL_TX_FLUSH_QUEUE_RSP 16
+
+/**
+ * struct iwl_flush_queue_info - virtual flush queue info
+ * @queue_num: virtual queue id
+ * @read_before_flush: read pointer before flush
+ * @read_after_flush: read pointer after flush
+ */
+struct iwl_flush_queue_info {
+ __le16 tid;
+ __le16 queue_num;
+ __le16 read_before_flush;
+ __le16 read_after_flush;
+} __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @num_flushed_queues: number of queues in queues array
+ * @queues: all flushed queues
+ */
+struct iwl_tx_path_flush_cmd_rsp {
+ __le16 sta_id;
+ __le16 num_flushed_queues;
+ struct iwl_flush_queue_info queues[IWL_TX_FLUSH_QUEUE_RSP];
+} __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */
+
+/* Available options for the SCD_QUEUE_CFG HCMD */
+enum iwl_scd_cfg_actions {
+ SCD_CFG_DISABLE_QUEUE = 0x0,
+ SCD_CFG_ENABLE_QUEUE = 0x1,
+ SCD_CFG_UPDATE_QUEUE_TID = 0x2,
+};
+
+/**
+ * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token: unused
+ * @sta_id: station id
+ * @tid: TID
+ * @scd_queue: scheduler queue to confiug
+ * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
+ * Value is one of &enum iwl_scd_cfg_actions options
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: &enum iwl_mvm_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ * @reserved: reserved
+ */
+struct iwl_scd_txq_cfg_cmd {
+ u8 token;
+ u8 sta_id;
+ u8 tid;
+ u8 scd_queue;
+ u8 action;
+ u8 aggregate;
+ u8 tx_fifo;
+ u8 window;
+ __le16 ssn;
+ __le16 reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwl_scd_txq_cfg_rsp {
+ u8 token;
+ u8 sta_id;
+ u8 tid;
+ u8 scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_tx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
new file mode 100644
index 000000000..e01894631
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014, 2019-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_api_txq_h__
+#define __iwl_fw_api_txq_h__
+
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
+ * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
+ * monitor mode. Note this queue is the same as the queue for P2P device
+ * but we can't have active monitor mode along with P2P device anyway.
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ * that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ * Each MGMT queue is mapped to a single STA
+ * MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
+ * responses
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ * DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ * as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+ IWL_MVM_DQA_CMD_QUEUE = 0,
+ IWL_MVM_DQA_AUX_QUEUE = 1,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+ IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
+ IWL_MVM_DQA_GCAST_QUEUE = 3,
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+ IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
+ IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+ IWL_MVM_DQA_MAX_DATA_QUEUE = 30,
+};
+
+enum iwl_mvm_tx_fifo {
+ IWL_MVM_TX_FIFO_BK = 0,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_VO,
+ IWL_MVM_TX_FIFO_MCAST = 5,
+ IWL_MVM_TX_FIFO_CMD = 7,
+};
+
+enum iwl_gen2_tx_fifo {
+ IWL_GEN2_TX_FIFO_CMD = 0,
+ IWL_GEN2_EDCA_TX_FIFO_BK,
+ IWL_GEN2_EDCA_TX_FIFO_BE,
+ IWL_GEN2_EDCA_TX_FIFO_VI,
+ IWL_GEN2_EDCA_TX_FIFO_VO,
+ IWL_GEN2_TRIG_TX_FIFO_BK,
+ IWL_GEN2_TRIG_TX_FIFO_BE,
+ IWL_GEN2_TRIG_TX_FIFO_VI,
+ IWL_GEN2_TRIG_TX_FIFO_VO,
+};
+
+/**
+ * enum iwl_tx_queue_cfg_actions - TXQ config options
+ * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
+ * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
+ */
+enum iwl_tx_queue_cfg_actions {
+ TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
+ TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
+};
+
+#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
+#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
+#define IWL_DEFAULT_QUEUE_SIZE 256
+#define IWL_MGMT_QUEUE_SIZE 16
+#define IWL_CMD_QUEUE_SIZE 32
+/**
+ * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: see &enum iwl_tx_queue_cfg_actions
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwl_tx_queue_cfg_cmd {
+ u8 sta_id;
+ u8 tid;
+ __le16 flags;
+ __le32 cb_size;
+ __le64 byte_cnt_addr;
+ __le64 tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ * @reserved: reserved
+ */
+struct iwl_tx_queue_cfg_rsp {
+ __le16 queue_number;
+ __le16 flags;
+ __le16 write_pointer;
+ __le16 reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_txq_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
new file mode 100644
index 000000000..3ef0b776b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -0,0 +1,3152 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/devcoredump.h>
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "iwl-fh.h"
+/**
+ * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
+ *
+ * @fwrt_ptr: pointer to the buffer coming from fwrt
+ * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
+ * transport's data.
+ * @trans_len: length of the valid data in trans_ptr
+ * @fwrt_len: length of the valid data in fwrt_ptr
+ */
+struct iwl_fw_dump_ptrs {
+ struct iwl_trans_dump_data *trans_ptr;
+ void *fwrt_ptr;
+ u32 fwrt_len;
+};
+
+#define RADIO_REG_MAX_READ 0x2ad
+static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data)
+{
+ u8 *pos = (void *)(*dump_data)->data;
+ int i;
+
+ IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return;
+
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
+ (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
+
+ for (i = 0; i < RADIO_REG_MAX_READ; i++) {
+ u32 rd_cmd = RADIO_RSP_RD_CMD;
+
+ rd_cmd |= i << RADIO_RSP_ADDR_POS;
+ iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
+ *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
+
+ pos++;
+ }
+
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+
+ iwl_trans_release_nic_access(fwrt->trans);
+}
+
+static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data,
+ int size, u32 offset, int fifo_num)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = size;
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ return;
+
+ /* Add a TLV for the RXF */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ RXF_RD_D_SPACE + offset));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ RXF_RD_WR_PTR + offset));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ RXF_RD_RD_PTR + offset));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ RXF_RD_FENCE_PTR + offset));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ RXF_SET_FENCE_MODE + offset));
+
+ /* Lock fence */
+ iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
+ /* Set fence offset */
+ iwl_trans_write_prph(fwrt->trans,
+ RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (i = 0; i < fifo_len; i++)
+ fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
+ RXF_FIFO_RD_FENCE_INC +
+ offset);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data,
+ int size, u32 offset, int fifo_num)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = size;
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ return;
+
+ /* Add a TLV for the FIFO */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_FIFO_ITEM_CNT + offset));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_WR_PTR + offset));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_RD_PTR + offset));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_FENCE_PTR + offset));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_LOCK_FENCE + offset));
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
+ TXF_WR_PTR + offset);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
+
+ /* Read FIFO */
+ for (i = 0; i < fifo_len / sizeof(u32); i++)
+ fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
+ TXF_READ_MODIFY_DATA +
+ offset);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
+ fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
+ fifo_data, fifo_len);
+
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data)
+{
+ struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
+
+ IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return;
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
+ /* Pull RXF1 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV +
+ fwrt->trans->trans_cfg->umac_prph_offset, 1);
+ /* Pull LMAC2 RXF1 */
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
+ }
+
+ iwl_trans_release_nic_access(fwrt->trans);
+}
+
+static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i, j;
+
+ IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return;
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
+ /* Pull TXF data from LMAC1 */
+ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
+ iwl_fwrt_dump_txf(fwrt, dump_data,
+ cfg->lmac[0].txfifo_size[i], 0, i);
+ }
+
+ /* Pull TXF data from LMAC2 */
+ if (fwrt->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+ i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans,
+ TXF_LARC_NUM +
+ LMAC2_PRPH_OFFSET, i);
+ iwl_fwrt_dump_txf(fwrt, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
+ }
+ }
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ /* Pull UMAC internal TXF data from all TXFs */
+ for (i = 0;
+ i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
+ i++) {
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the internal FIFOs */
+ (*dump_data)->type =
+ cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+ (*dump_data)->len =
+ cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
+ fwrt->smem_cfg.num_txfifo_entries);
+
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_FIFO_ITEM_CNT));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_WR_PTR));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_RD_PTR));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_FENCE_PTR));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_LOCK_FENCE));
+
+ /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+ iwl_trans_write_prph(fwrt->trans,
+ TXF_CPU2_READ_MODIFY_ADDR,
+ TXF_CPU2_WR_PTR);
+
+ /* Dummy-read to advance the read pointer to head */
+ iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_READ_MODIFY_DATA);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] =
+ iwl_trans_read_prph(fwrt->trans,
+ TXF_CPU2_READ_MODIFY_DATA);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+ }
+
+ iwl_trans_release_nic_access(fwrt->trans);
+}
+
+struct iwl_prph_range {
+ u32 start, end;
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
+ { .start = 0x00a00000, .end = 0x00a00000 },
+ { .start = 0x00a0000c, .end = 0x00a00024 },
+ { .start = 0x00a0002c, .end = 0x00a0003c },
+ { .start = 0x00a00410, .end = 0x00a00418 },
+ { .start = 0x00a00420, .end = 0x00a00420 },
+ { .start = 0x00a00428, .end = 0x00a00428 },
+ { .start = 0x00a00430, .end = 0x00a0043c },
+ { .start = 0x00a00444, .end = 0x00a00444 },
+ { .start = 0x00a004c0, .end = 0x00a004cc },
+ { .start = 0x00a004d8, .end = 0x00a004d8 },
+ { .start = 0x00a004e0, .end = 0x00a004f0 },
+ { .start = 0x00a00840, .end = 0x00a00840 },
+ { .start = 0x00a00850, .end = 0x00a00858 },
+ { .start = 0x00a01004, .end = 0x00a01008 },
+ { .start = 0x00a01010, .end = 0x00a01010 },
+ { .start = 0x00a01018, .end = 0x00a01018 },
+ { .start = 0x00a01024, .end = 0x00a01024 },
+ { .start = 0x00a0102c, .end = 0x00a01034 },
+ { .start = 0x00a0103c, .end = 0x00a01040 },
+ { .start = 0x00a01048, .end = 0x00a01094 },
+ { .start = 0x00a01c00, .end = 0x00a01c20 },
+ { .start = 0x00a01c58, .end = 0x00a01c58 },
+ { .start = 0x00a01c7c, .end = 0x00a01c7c },
+ { .start = 0x00a01c28, .end = 0x00a01c54 },
+ { .start = 0x00a01c5c, .end = 0x00a01c5c },
+ { .start = 0x00a01c60, .end = 0x00a01cdc },
+ { .start = 0x00a01ce0, .end = 0x00a01d0c },
+ { .start = 0x00a01d18, .end = 0x00a01d20 },
+ { .start = 0x00a01d2c, .end = 0x00a01d30 },
+ { .start = 0x00a01d40, .end = 0x00a01d5c },
+ { .start = 0x00a01d80, .end = 0x00a01d80 },
+ { .start = 0x00a01d98, .end = 0x00a01d9c },
+ { .start = 0x00a01da8, .end = 0x00a01da8 },
+ { .start = 0x00a01db8, .end = 0x00a01df4 },
+ { .start = 0x00a01dc0, .end = 0x00a01dfc },
+ { .start = 0x00a01e00, .end = 0x00a01e2c },
+ { .start = 0x00a01e40, .end = 0x00a01e60 },
+ { .start = 0x00a01e68, .end = 0x00a01e6c },
+ { .start = 0x00a01e74, .end = 0x00a01e74 },
+ { .start = 0x00a01e84, .end = 0x00a01e90 },
+ { .start = 0x00a01e9c, .end = 0x00a01ec4 },
+ { .start = 0x00a01ed0, .end = 0x00a01ee0 },
+ { .start = 0x00a01f00, .end = 0x00a01f1c },
+ { .start = 0x00a01f44, .end = 0x00a01ffc },
+ { .start = 0x00a02000, .end = 0x00a02048 },
+ { .start = 0x00a02068, .end = 0x00a020f0 },
+ { .start = 0x00a02100, .end = 0x00a02118 },
+ { .start = 0x00a02140, .end = 0x00a0214c },
+ { .start = 0x00a02168, .end = 0x00a0218c },
+ { .start = 0x00a021c0, .end = 0x00a021c0 },
+ { .start = 0x00a02400, .end = 0x00a02410 },
+ { .start = 0x00a02418, .end = 0x00a02420 },
+ { .start = 0x00a02428, .end = 0x00a0242c },
+ { .start = 0x00a02434, .end = 0x00a02434 },
+ { .start = 0x00a02440, .end = 0x00a02460 },
+ { .start = 0x00a02468, .end = 0x00a024b0 },
+ { .start = 0x00a024c8, .end = 0x00a024cc },
+ { .start = 0x00a02500, .end = 0x00a02504 },
+ { .start = 0x00a0250c, .end = 0x00a02510 },
+ { .start = 0x00a02540, .end = 0x00a02554 },
+ { .start = 0x00a02580, .end = 0x00a025f4 },
+ { .start = 0x00a02600, .end = 0x00a0260c },
+ { .start = 0x00a02648, .end = 0x00a02650 },
+ { .start = 0x00a02680, .end = 0x00a02680 },
+ { .start = 0x00a026c0, .end = 0x00a026d0 },
+ { .start = 0x00a02700, .end = 0x00a0270c },
+ { .start = 0x00a02804, .end = 0x00a02804 },
+ { .start = 0x00a02818, .end = 0x00a0281c },
+ { .start = 0x00a02c00, .end = 0x00a02db4 },
+ { .start = 0x00a02df4, .end = 0x00a02fb0 },
+ { .start = 0x00a03000, .end = 0x00a03014 },
+ { .start = 0x00a0301c, .end = 0x00a0302c },
+ { .start = 0x00a03034, .end = 0x00a03038 },
+ { .start = 0x00a03040, .end = 0x00a03048 },
+ { .start = 0x00a03060, .end = 0x00a03068 },
+ { .start = 0x00a03070, .end = 0x00a03074 },
+ { .start = 0x00a0307c, .end = 0x00a0307c },
+ { .start = 0x00a03080, .end = 0x00a03084 },
+ { .start = 0x00a0308c, .end = 0x00a03090 },
+ { .start = 0x00a03098, .end = 0x00a03098 },
+ { .start = 0x00a030a0, .end = 0x00a030a0 },
+ { .start = 0x00a030a8, .end = 0x00a030b4 },
+ { .start = 0x00a030bc, .end = 0x00a030bc },
+ { .start = 0x00a030c0, .end = 0x00a0312c },
+ { .start = 0x00a03c00, .end = 0x00a03c5c },
+ { .start = 0x00a04400, .end = 0x00a04454 },
+ { .start = 0x00a04460, .end = 0x00a04474 },
+ { .start = 0x00a044c0, .end = 0x00a044ec },
+ { .start = 0x00a04500, .end = 0x00a04504 },
+ { .start = 0x00a04510, .end = 0x00a04538 },
+ { .start = 0x00a04540, .end = 0x00a04548 },
+ { .start = 0x00a04560, .end = 0x00a0457c },
+ { .start = 0x00a04590, .end = 0x00a04598 },
+ { .start = 0x00a045c0, .end = 0x00a045f4 },
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
+ { .start = 0x00a05c00, .end = 0x00a05c18 },
+ { .start = 0x00a05400, .end = 0x00a056e8 },
+ { .start = 0x00a08000, .end = 0x00a098bc },
+ { .start = 0x00a02400, .end = 0x00a02758 },
+ { .start = 0x00a04764, .end = 0x00a0476c },
+ { .start = 0x00a04770, .end = 0x00a04774 },
+ { .start = 0x00a04620, .end = 0x00a04624 },
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
+ { .start = 0x00a00000, .end = 0x00a00000 },
+ { .start = 0x00a0000c, .end = 0x00a00024 },
+ { .start = 0x00a0002c, .end = 0x00a00034 },
+ { .start = 0x00a0003c, .end = 0x00a0003c },
+ { .start = 0x00a00410, .end = 0x00a00418 },
+ { .start = 0x00a00420, .end = 0x00a00420 },
+ { .start = 0x00a00428, .end = 0x00a00428 },
+ { .start = 0x00a00430, .end = 0x00a0043c },
+ { .start = 0x00a00444, .end = 0x00a00444 },
+ { .start = 0x00a00840, .end = 0x00a00840 },
+ { .start = 0x00a00850, .end = 0x00a00858 },
+ { .start = 0x00a01004, .end = 0x00a01008 },
+ { .start = 0x00a01010, .end = 0x00a01010 },
+ { .start = 0x00a01018, .end = 0x00a01018 },
+ { .start = 0x00a01024, .end = 0x00a01024 },
+ { .start = 0x00a0102c, .end = 0x00a01034 },
+ { .start = 0x00a0103c, .end = 0x00a01040 },
+ { .start = 0x00a01048, .end = 0x00a01050 },
+ { .start = 0x00a01058, .end = 0x00a01058 },
+ { .start = 0x00a01060, .end = 0x00a01070 },
+ { .start = 0x00a0108c, .end = 0x00a0108c },
+ { .start = 0x00a01c20, .end = 0x00a01c28 },
+ { .start = 0x00a01d10, .end = 0x00a01d10 },
+ { .start = 0x00a01e28, .end = 0x00a01e2c },
+ { .start = 0x00a01e60, .end = 0x00a01e60 },
+ { .start = 0x00a01e80, .end = 0x00a01e80 },
+ { .start = 0x00a01ea0, .end = 0x00a01ea0 },
+ { .start = 0x00a02000, .end = 0x00a0201c },
+ { .start = 0x00a02024, .end = 0x00a02024 },
+ { .start = 0x00a02040, .end = 0x00a02048 },
+ { .start = 0x00a020c0, .end = 0x00a020e0 },
+ { .start = 0x00a02400, .end = 0x00a02404 },
+ { .start = 0x00a0240c, .end = 0x00a02414 },
+ { .start = 0x00a0241c, .end = 0x00a0243c },
+ { .start = 0x00a02448, .end = 0x00a024bc },
+ { .start = 0x00a024c4, .end = 0x00a024cc },
+ { .start = 0x00a02508, .end = 0x00a02508 },
+ { .start = 0x00a02510, .end = 0x00a02514 },
+ { .start = 0x00a0251c, .end = 0x00a0251c },
+ { .start = 0x00a0252c, .end = 0x00a0255c },
+ { .start = 0x00a02564, .end = 0x00a025a0 },
+ { .start = 0x00a025a8, .end = 0x00a025b4 },
+ { .start = 0x00a025c0, .end = 0x00a025c0 },
+ { .start = 0x00a025e8, .end = 0x00a025f4 },
+ { .start = 0x00a02c08, .end = 0x00a02c18 },
+ { .start = 0x00a02c2c, .end = 0x00a02c38 },
+ { .start = 0x00a02c68, .end = 0x00a02c78 },
+ { .start = 0x00a03000, .end = 0x00a03000 },
+ { .start = 0x00a03010, .end = 0x00a03014 },
+ { .start = 0x00a0301c, .end = 0x00a0302c },
+ { .start = 0x00a03034, .end = 0x00a03038 },
+ { .start = 0x00a03040, .end = 0x00a03044 },
+ { .start = 0x00a03060, .end = 0x00a03068 },
+ { .start = 0x00a03070, .end = 0x00a03070 },
+ { .start = 0x00a0307c, .end = 0x00a03084 },
+ { .start = 0x00a0308c, .end = 0x00a03090 },
+ { .start = 0x00a03098, .end = 0x00a03098 },
+ { .start = 0x00a030a0, .end = 0x00a030a0 },
+ { .start = 0x00a030a8, .end = 0x00a030b4 },
+ { .start = 0x00a030bc, .end = 0x00a030c0 },
+ { .start = 0x00a030c8, .end = 0x00a030f4 },
+ { .start = 0x00a03100, .end = 0x00a0312c },
+ { .start = 0x00a03c00, .end = 0x00a03c5c },
+ { .start = 0x00a04400, .end = 0x00a04454 },
+ { .start = 0x00a04460, .end = 0x00a04474 },
+ { .start = 0x00a044c0, .end = 0x00a044ec },
+ { .start = 0x00a04500, .end = 0x00a04504 },
+ { .start = 0x00a04510, .end = 0x00a04538 },
+ { .start = 0x00a04540, .end = 0x00a04548 },
+ { .start = 0x00a04560, .end = 0x00a04560 },
+ { .start = 0x00a04570, .end = 0x00a0457c },
+ { .start = 0x00a04590, .end = 0x00a04590 },
+ { .start = 0x00a04598, .end = 0x00a04598 },
+ { .start = 0x00a045c0, .end = 0x00a045f4 },
+ { .start = 0x00a05c18, .end = 0x00a05c1c },
+ { .start = 0x00a0c000, .end = 0x00a0c018 },
+ { .start = 0x00a0c020, .end = 0x00a0c028 },
+ { .start = 0x00a0c038, .end = 0x00a0c094 },
+ { .start = 0x00a0c0c0, .end = 0x00a0c104 },
+ { .start = 0x00a0c10c, .end = 0x00a0c118 },
+ { .start = 0x00a0c150, .end = 0x00a0c174 },
+ { .start = 0x00a0c17c, .end = 0x00a0c188 },
+ { .start = 0x00a0c190, .end = 0x00a0c198 },
+ { .start = 0x00a0c1a0, .end = 0x00a0c1a8 },
+ { .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = {
+ { .start = 0x00d03c00, .end = 0x00d03c64 },
+ { .start = 0x00d05c18, .end = 0x00d05c1c },
+ { .start = 0x00d0c000, .end = 0x00d0c174 },
+};
+
+static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+ u32 len_bytes, __le32 *data)
+{
+ u32 i;
+
+ for (i = 0; i < len_bytes; i += 4)
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
+}
+
+static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len, void *ptr)
+{
+ struct iwl_fw_error_dump_prph *prph;
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_fw_error_dump_data **data =
+ (struct iwl_fw_error_dump_data **)ptr;
+ u32 i;
+
+ if (!data)
+ return;
+
+ IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return;
+
+ for (i = 0; i < range_len; i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+ (*data)->len = cpu_to_le32(sizeof(*prph) +
+ num_bytes_in_chunk);
+ prph = (void *)(*data)->data;
+ prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
+
+ iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+ /* our range is inclusive, hence + 4 */
+ iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4,
+ (void *)prph->data);
+
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+ iwl_trans_release_nic_access(trans);
+}
+
+/*
+ * alloc_sgtable - allocates scallerlist table in the given size,
+ * fills it with pages and returns it
+ * @size: the size (in bytes) of the table
+*/
+static struct scatterlist *alloc_sgtable(int size)
+{
+ int alloc_size, nents, i;
+ struct page *new_page;
+ struct scatterlist *iter;
+ struct scatterlist *table;
+
+ nents = DIV_ROUND_UP(size, PAGE_SIZE);
+ table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+ sg_init_table(table, nents);
+ iter = table;
+ for_each_sg(table, iter, sg_nents(table), i) {
+ new_page = alloc_page(GFP_KERNEL);
+ if (!new_page) {
+ /* release all previous allocated pages in the table */
+ iter = table;
+ for_each_sg(table, iter, sg_nents(table), i) {
+ new_page = sg_page(iter);
+ if (new_page)
+ __free_page(new_page);
+ }
+ kfree(table);
+ return NULL;
+ }
+ alloc_size = min_t(int, size, PAGE_SIZE);
+ size -= PAGE_SIZE;
+ sg_set_page(iter, new_page, alloc_size, 0);
+ }
+ return table;
+}
+
+static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len, void *ptr)
+{
+ u32 *prph_len = (u32 *)ptr;
+ int i, num_bytes_in_chunk;
+
+ if (!prph_len)
+ return;
+
+ for (i = 0; i < range_len; i++) {
+ /* The range includes both boundaries */
+ num_bytes_in_chunk =
+ iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
+
+ *prph_len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+}
+
+static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
+ void (*handler)(struct iwl_fw_runtime *,
+ const struct iwl_prph_range *,
+ u32, void *))
+{
+ u32 range_len;
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
+ handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
+ } else if (fwrt->trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_22000) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
+ handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
+ } else {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
+ handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
+
+ if (fwrt->trans->trans_cfg->mq_rx_supported) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
+ handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
+ }
+ }
+}
+
+static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data,
+ u32 len, u32 ofs, u32 type)
+{
+ struct iwl_fw_error_dump_mem *dump_mem;
+
+ if (!len)
+ return;
+
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+ dump_mem = (void *)(*dump_data)->data;
+ dump_mem->type = cpu_to_le32(type);
+ dump_mem->offset = cpu_to_le32(ofs);
+ iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs,
+ dump_mem->data, len);
+
+ IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+}
+
+#define ADD_LEN(len, item_len, const_len) \
+ do {size_t item = item_len; len += (!!item) * const_len + item; } \
+ while (0)
+
+static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+{
+ size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ u32 fifo_len = 0;
+ int i;
+
+ if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
+ return 0;
+
+ /* Count RXF2 size */
+ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
+
+ /* Count RXF1 sizes */
+ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
+ mem_cfg->num_lmacs = MAX_NUM_LMAC;
+
+ for (i = 0; i < mem_cfg->num_lmacs; i++)
+ ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
+
+ return fifo_len;
+}
+
+static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+{
+ size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ u32 fifo_len = 0;
+ int i;
+
+ if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
+ goto dump_internal_txf;
+
+ /* Count TXF sizes */
+ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
+ mem_cfg->num_lmacs = MAX_NUM_LMAC;
+
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
+ ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
+ hdr_len);
+ }
+
+dump_internal_txf:
+ if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
+ ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
+
+out:
+ return fifo_len;
+}
+
+static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **data)
+{
+ int i;
+
+ IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
+ for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
+ struct iwl_fw_error_dump_paging *paging;
+ struct page *pages =
+ fwrt->fw_paging_db[i].fw_paging_block;
+ dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+ (*data)->len = cpu_to_le32(sizeof(*paging) +
+ PAGING_BLOCK_SIZE);
+ paging = (void *)(*data)->data;
+ paging->index = cpu_to_le32(i);
+ dma_sync_single_for_cpu(fwrt->trans->dev, addr,
+ PAGING_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ memcpy(paging->data, page_address(pages),
+ PAGING_BLOCK_SIZE);
+ dma_sync_single_for_device(fwrt->trans->dev, addr,
+ PAGING_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ (*data) = iwl_fw_error_next_data(*data);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
+ fwrt->fw_paging_db[i].fw_offs,
+ paging->data,
+ PAGING_BLOCK_SIZE);
+ }
+}
+
+static struct iwl_fw_error_dump_file *
+iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dump_ptrs *fw_error_dump,
+ struct iwl_fwrt_dump_data *data)
+{
+ struct iwl_fw_error_dump_file *dump_file;
+ struct iwl_fw_error_dump_data *dump_data;
+ struct iwl_fw_error_dump_info *dump_info;
+ struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
+ struct iwl_fw_error_dump_trigger_desc *dump_trig;
+ u32 sram_len, sram_ofs;
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
+ u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
+ u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+ u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
+ 0 : fwrt->trans->cfg->dccm2_len;
+ int i;
+
+ /* SRAM - include stack CCM if driver knows the values for it */
+ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
+ const struct fw_img *img;
+
+ if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+ img = &fwrt->fw->img[fwrt->cur_fw_img];
+ sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ } else {
+ sram_ofs = fwrt->trans->cfg->dccm_offset;
+ sram_len = fwrt->trans->cfg->dccm_len;
+ }
+
+ /* reading RXF/TXF sizes */
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+ fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
+ fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
+
+ /* Make room for PRPH registers */
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
+ iwl_fw_prph_handler(fwrt, &prph_len,
+ iwl_fw_get_prph_len);
+
+ if (fwrt->trans->trans_cfg->device_family ==
+ IWL_DEVICE_FAMILY_7000 &&
+ iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
+ radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
+ }
+
+ file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ file_len += sizeof(*dump_data) + sizeof(*dump_info);
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
+ file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
+ size_t hdr_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_mem);
+
+ /* Dump SRAM only if no mem_tlvs */
+ if (!fwrt->fw->dbg.n_mem_tlv)
+ ADD_LEN(file_len, sram_len, hdr_len);
+
+ /* Make room for all mem types that exist */
+ ADD_LEN(file_len, smem_len, hdr_len);
+ ADD_LEN(file_len, sram2_len, hdr_len);
+
+ for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
+ ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
+ }
+
+ /* Make room for fw's virtual image pages, if it exists */
+ if (iwl_fw_dbg_is_paging_enabled(fwrt))
+ file_len += fwrt->num_of_paging_blk *
+ (sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_paging) +
+ PAGING_BLOCK_SIZE);
+
+ if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+ file_len += sizeof(*dump_data) +
+ fwrt->trans->cfg->d3_debug_data_length * 2;
+ }
+
+ /* If we only want a monitor dump, reset the file length */
+ if (data->monitor_only) {
+ file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
+ sizeof(*dump_info) + sizeof(*dump_smem_cfg);
+ }
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ data->desc)
+ file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
+ data->desc->len;
+
+ dump_file = vzalloc(file_len);
+ if (!dump_file)
+ return NULL;
+
+ fw_error_dump->fwrt_ptr = dump_file;
+
+ dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+ dump_data = (void *)dump_file->data;
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_info));
+ dump_info = (void *)dump_data->data;
+ dump_info->hw_type =
+ cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
+ dump_info->hw_step =
+ cpu_to_le32(fwrt->trans->hw_rev_step);
+ memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+ sizeof(dump_info->fw_human_readable));
+ strncpy(dump_info->dev_human_readable, fwrt->trans->name,
+ sizeof(dump_info->dev_human_readable) - 1);
+ strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable) - 1);
+ dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
+ dump_info->lmac_err_id[0] =
+ cpu_to_le32(fwrt->dump.lmac_err_id[0]);
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ dump_info->lmac_err_id[1] =
+ cpu_to_le32(fwrt->dump.lmac_err_id[1]);
+ dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ /* Dump shared memory configuration */
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+ dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+ dump_smem_cfg = (void *)dump_data->data;
+ dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+ dump_smem_cfg->num_txfifo_entries =
+ cpu_to_le32(mem_cfg->num_txfifo_entries);
+ for (i = 0; i < MAX_NUM_LMAC; i++) {
+ int j;
+ u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+ for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+ dump_smem_cfg->lmac[i].txfifo_size[j] =
+ cpu_to_le32(txf_size[j]);
+ dump_smem_cfg->lmac[i].rxfifo1_size =
+ cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+ }
+ dump_smem_cfg->rxfifo2_size =
+ cpu_to_le32(mem_cfg->rxfifo2_size);
+ dump_smem_cfg->internal_txfifo_addr =
+ cpu_to_le32(mem_cfg->internal_txfifo_addr);
+ for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+ dump_smem_cfg->internal_txfifo_size[i] =
+ cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ }
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
+ /* We only dump the FIFOs if the FW is in error state */
+ if (fifo_len) {
+ iwl_fw_dump_rxf(fwrt, &dump_data);
+ iwl_fw_dump_txf(fwrt, &dump_data);
+ }
+
+ if (radio_len)
+ iwl_read_radio_regs(fwrt, &dump_data);
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ data->desc) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
+ data->desc->len);
+ dump_trig = (void *)dump_data->data;
+ memcpy(dump_trig, &data->desc->trig_desc,
+ sizeof(*dump_trig) + data->desc->len);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
+ /* In case we only want monitor dump, skip to dump trasport data */
+ if (data->monitor_only)
+ goto out;
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
+ fwrt->fw->dbg.mem_tlv;
+
+ if (!fwrt->fw->dbg.n_mem_tlv)
+ iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
+ IWL_FW_ERROR_DUMP_MEM_SRAM);
+
+ for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
+ u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+ u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+
+ iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
+ le32_to_cpu(fw_dbg_mem[i].data_type));
+ }
+
+ iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
+ fwrt->trans->cfg->smem_offset,
+ IWL_FW_ERROR_DUMP_MEM_SMEM);
+
+ iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
+ fwrt->trans->cfg->dccm2_offset,
+ IWL_FW_ERROR_DUMP_MEM_SRAM);
+ }
+
+ if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+ u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
+ size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
+
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+ dump_data->len = cpu_to_le32(data_size * 2);
+
+ memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
+
+ kfree(fwrt->dump.d3_debug_data);
+ fwrt->dump.d3_debug_data = NULL;
+
+ iwl_trans_read_mem_bytes(fwrt->trans, addr,
+ dump_data->data + data_size,
+ data_size);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr,
+ dump_data->data + data_size,
+ data_size);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
+ /* Dump fw's virtual image */
+ if (iwl_fw_dbg_is_paging_enabled(fwrt))
+ iwl_dump_paging(fwrt, &dump_data);
+
+ if (prph_len)
+ iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
+
+out:
+ dump_file->file_len = cpu_to_le32(file_len);
+ return dump_file;
+}
+
+/**
+ * struct iwl_dump_ini_region_data - region data
+ * @reg_tlv: region TLV
+ * @dump_data: dump data
+ */
+struct iwl_dump_ini_region_data {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fwrt_dump_data *dump_data;
+};
+
+static int
+iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 prph_val;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+ int i;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ prph_val = iwl_read_prph(fwrt->trans, addr + i);
+ if ((prph_val & ~0xf) == 0xa5a5a5a0)
+ return -EBUSY;
+ *val++ = cpu_to_le32(prph_val);
+ }
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1;
+ u32 indirect_rd_addr = WMAL_MRSPF_1;
+ u32 prph_val;
+ u32 addr = le32_to_cpu(reg->addrs[idx]);
+ u32 dphy_state;
+ u32 dphy_addr;
+ int i;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ indirect_wr_addr = WMAL_INDRCT_CMD1;
+
+ indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset);
+ indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset);
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return -EBUSY;
+
+ dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW :
+ WFPM_LMAC1_PS_CTL_RW;
+ dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr);
+
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ if (dphy_state == HBUS_TIMEOUT ||
+ (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) !=
+ WFPM_PHYRF_STATE_ON) {
+ *val++ = cpu_to_le32(WFPM_DPHY_OFF);
+ continue;
+ }
+
+ iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr,
+ WMAL_INDRCT_CMD(addr + i));
+ prph_val = iwl_read_prph_no_grab(fwrt->trans,
+ indirect_rd_addr);
+ *val++ = cpu_to_le32(prph_val);
+ }
+
+ iwl_trans_release_nic_access(fwrt->trans);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+ int i;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4)
+ *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+ int i;
+
+ /* we shouldn't get here if the trans doesn't have read_config32 */
+ if (WARN_ON_ONCE(!trans->ops->read_config32))
+ return -EOPNOTSUPP;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ int ret;
+ u32 tmp;
+
+ ret = trans->ops->read_config32(trans, addr + i, &tmp);
+ if (ret < 0)
+ return ret;
+
+ *val++ = cpu_to_le32(tmp);
+ }
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(reg->dev_addr.size));
+
+ if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM &&
+ fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
+ fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
+ range->data,
+ le32_to_cpu(reg->dev_addr.size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
+ u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
+
+ range->page_num = cpu_to_le32(idx);
+ range->range_data_size = cpu_to_le32(page_size);
+ dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
+ DMA_BIDIRECTIONAL);
+ memcpy(range->data, page_address(page), page_size);
+ dma_sync_single_for_device(fwrt->trans->dev, addr, page_size,
+ DMA_BIDIRECTIONAL);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range;
+ u32 page_size;
+
+ /* all paged index start from 1 to skip CSS section */
+ idx++;
+
+ if (!fwrt->trans->trans_cfg->gen2)
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
+
+ range = range_ptr;
+ page_size = fwrt->trans->init_dram.paging[idx].size;
+
+ range->page_num = cpu_to_le32(idx);
+ range->range_data_size = cpu_to_le32(page_size);
+ memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
+ page_size);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_dram_data *frag;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
+
+ frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx];
+
+ range->dram_base_addr = cpu_to_le64(frag->physical);
+ range->range_data_size = cpu_to_le32(frag->size);
+
+ memcpy(range->data, frag->block, frag->size);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(reg->internal_buffer.base_addr);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->internal_buffer.size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(reg->internal_buffer.size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
+ struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
+ int txf_num = cfg->num_txfifo_entries;
+ int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size);
+ u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]);
+
+ if (!idx) {
+ if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) {
+ IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n",
+ le32_to_cpu(reg->fifos.offset));
+ return false;
+ }
+
+ iter->internal_txf = 0;
+ iter->fifo_size = 0;
+ iter->fifo = -1;
+ if (le32_to_cpu(reg->fifos.offset))
+ iter->lmac = 1;
+ else
+ iter->lmac = 0;
+ }
+
+ if (!iter->internal_txf) {
+ for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
+ iter->fifo_size =
+ cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
+ if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
+ return true;
+ }
+ iter->fifo--;
+ }
+
+ iter->internal_txf = 1;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ return false;
+
+ for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) {
+ iter->fifo_size =
+ cfg->internal_txfifo_size[iter->fifo - txf_num];
+ if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
+ return true;
+ }
+
+ return false;
+}
+
+static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
+ struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
+ __le32 *data;
+ int i;
+
+ if (!iwl_ini_txf_iter(fwrt, reg_data, idx))
+ return -EIO;
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return -EBUSY;
+
+ range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
+ range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
+
+ iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
+
+ /*
+ * read txf registers. for each register, write to the dump the
+ * register address and its value
+ */
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
+
+ reg_dump->addr = cpu_to_le32(addr);
+ reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
+ addr));
+
+ reg_dump++;
+ }
+
+ if (reg->fifos.hdr_only) {
+ range->range_data_size = cpu_to_le32(registers_size);
+ goto out;
+ }
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs,
+ TXF_WR_PTR + offs);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs);
+
+ /* Read FIFO */
+ addr = TXF_READ_MODIFY_DATA + offs;
+ data = (void *)reg_dump;
+ for (i = 0; i < iter->fifo_size; i += sizeof(*data))
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
+ fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
+ reg_dump, iter->fifo_size);
+
+out:
+ iwl_trans_release_nic_access(fwrt->trans);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+struct iwl_ini_rxf_data {
+ u32 fifo_num;
+ u32 size;
+ u32 offset;
+};
+
+static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ struct iwl_ini_rxf_data *data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 fid1 = le32_to_cpu(reg->fifos.fid[0]);
+ u32 fid2 = le32_to_cpu(reg->fifos.fid[1]);
+ u8 fifo_idx;
+
+ if (!data)
+ return;
+
+ memset(data, 0, sizeof(*data));
+
+ /* make sure only one bit is set in only one fid */
+ if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
+ "fid1=%x, fid2=%x\n", fid1, fid2))
+ return;
+
+ if (fid1) {
+ fifo_idx = ffs(fid1) - 1;
+ if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
+ fifo_idx))
+ return;
+
+ data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size;
+ data->fifo_num = fifo_idx;
+ } else {
+ u8 max_idx;
+
+ fifo_idx = ffs(fid2) - 1;
+ if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
+ SHARED_MEM_CFG_CMD, 0) <= 3)
+ max_idx = 0;
+ else
+ max_idx = 1;
+
+ if (WARN_ONCE(fifo_idx > max_idx,
+ "invalid umac fifo idx %d", fifo_idx))
+ return;
+
+ /* use bit 31 to distinguish between umac and lmac rxf while
+ * parsing the dump
+ */
+ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT;
+
+ switch (fifo_idx) {
+ case 0:
+ data->size = fwrt->smem_cfg.rxfifo2_size;
+ data->offset = iwl_umac_prph(fwrt->trans,
+ RXF_DIFF_FROM_PREV);
+ break;
+ case 1:
+ data->size = fwrt->smem_cfg.rxfifo2_control_size;
+ data->offset = iwl_umac_prph(fwrt->trans,
+ RXF2C_DIFF_FROM_PREV);
+ break;
+ }
+ }
+}
+
+static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_ini_rxf_data rxf_data;
+ struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
+ u32 offs = le32_to_cpu(reg->fifos.offset), addr;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 registers_size = registers_num * sizeof(*reg_dump);
+ __le32 *data;
+ int i;
+
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data);
+ if (!rxf_data.size)
+ return -EIO;
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return -EBUSY;
+
+ range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
+ range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num);
+ range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
+
+ /*
+ * read rxf registers. for each register, write to the dump the
+ * register address and its value
+ */
+ for (i = 0; i < registers_num; i++) {
+ addr = le32_to_cpu(reg->addrs[i]) + offs;
+
+ reg_dump->addr = cpu_to_le32(addr);
+ reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
+ addr));
+
+ reg_dump++;
+ }
+
+ if (reg->fifos.hdr_only) {
+ range->range_data_size = cpu_to_le32(registers_size);
+ goto out;
+ }
+
+ offs = rxf_data.offset;
+
+ /* Lock fence */
+ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1);
+ /* Set fence offset */
+ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs,
+ 0x0);
+
+ /* Read FIFO */
+ addr = RXF_FIFO_RD_FENCE_INC + offs;
+ data = (void *)reg_dump;
+ for (i = 0; i < rxf_data.size; i += sizeof(*data))
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+
+out:
+ iwl_trans_release_nic_access(fwrt->trans);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(err_table->base_addr) +
+ le32_to_cpu(err_table->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = err_table->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(err_table->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_special_device_memory *special_mem =
+ &reg->special_mem;
+
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(special_mem->base_addr) +
+ le32_to_cpu(special_mem->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = special_mem->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(special_mem->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 prph_data;
+ int i;
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return -EBUSY;
+
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
+ prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
+ DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
+ DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
+ if ((prph_data & ~0xf) == 0xa5a5a5a0) {
+ iwl_trans_release_nic_access(fwrt->trans);
+ return -EBUSY;
+ }
+ *val++ = cpu_to_le32(prph_data);
+ }
+ iwl_trans_release_nic_access(fwrt->trans);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
+ u32 pkt_len;
+
+ if (!pkt)
+ return -EIO;
+
+ pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr));
+ range->range_data_size = cpu_to_le32(pkt_len);
+
+ memcpy(range->data, pkt->data, pkt_len);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ /* read the IMR memory and DMA it to SRAM */
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
+ u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
+ u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+ u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
+
+ range->range_data_size = cpu_to_le32(size_to_dump);
+ if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
+ imr_curr_addr, size_to_dump)) {
+ IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
+ return -1;
+ }
+
+ fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
+ fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
+
+ iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
+ size_to_dump);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static void *
+iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_error_dump *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+ return dump->data;
+}
+
+/**
+ * mask_apply_and_normalize - applies mask on val and normalize the result
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * @val: value
+ * @mask: mask to apply and to normalize with
+ */
+static u32 mask_apply_and_normalize(u32 val, u32 mask)
+{
+ return (val & mask) >> (ffs(mask) - 1);
+}
+
+static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ const struct iwl_fw_mon_reg *reg_info)
+{
+ u32 val, offs;
+
+ /* The header addresses of DBGCi is calculate as follows:
+ * DBGC1 address + (0x100 * i)
+ */
+ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100;
+
+ if (!reg_info || !reg_info->addr || !reg_info->mask)
+ return 0;
+
+ val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs);
+
+ return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask));
+}
+
+static void *
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ struct iwl_fw_ini_monitor_dump *data,
+ const struct iwl_fw_mon_regs *addrs)
+{
+ if (!iwl_trans_grab_nic_access(fwrt->trans)) {
+ IWL_ERR(fwrt, "Failed to get monitor header\n");
+ return NULL;
+ }
+
+ data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->write_ptr);
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ u32 wrt_ptr = le32_to_cpu(data->write_ptr);
+
+ data->write_ptr = cpu_to_le32(wrt_ptr >> 2);
+ }
+ data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cycle_cnt);
+ data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
+ &addrs->cur_frag);
+
+ iwl_trans_release_nic_access(fwrt->trans);
+
+ data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+ return data->data;
+}
+
+static void *
+iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
+
+ return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
+ &fwrt->trans->cfg->mon_dram_regs);
+}
+
+static void *
+iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
+
+ return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
+ &fwrt->trans->cfg->mon_smem_regs);
+}
+
+static void *
+iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+
+ return iwl_dump_ini_mon_fill_header(fwrt,
+ /* no offset calculation later */
+ IWL_FW_INI_ALLOCATION_ID_DBGC1,
+ mon_dump,
+ &fwrt->trans->cfg->mon_dbgi_regs);
+}
+
+static void *
+iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_err_table_dump *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->version = reg->err_table.version;
+
+ return dump->data;
+}
+
+static void *
+iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_special_device_memory *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->type = reg->special_mem.type;
+ dump->version = reg->special_mem.version;
+
+ return dump->data;
+}
+
+static void *
+iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_error_dump *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+ return dump->data;
+}
+
+static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+
+ return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+}
+
+static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ if (fwrt->trans->trans_cfg->gen2) {
+ if (fwrt->trans->init_dram.paging_cnt)
+ return fwrt->trans->init_dram.paging_cnt - 1;
+ else
+ return 0;
+ }
+
+ return fwrt->num_of_paging_blk;
+}
+
+static u32
+iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ if (!fw_mon->frags[i].size)
+ break;
+
+ ranges++;
+ }
+
+ return ranges;
+}
+
+static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 num_of_fifos = 0;
+
+ while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos))
+ num_of_fifos++;
+
+ return num_of_fifos;
+}
+
+static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ return 1;
+}
+
+static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ /* range is total number of pages need to copied from
+ *IMR memory to SRAM and later from SRAM to DRAM
+ */
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return 0;
+ }
+
+ return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
+}
+
+static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_error_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
+}
+
+static u32
+iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ int i;
+ u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
+ u32 size = sizeof(struct iwl_fw_ini_error_dump);
+
+ /* start from 1 to skip CSS section */
+ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) {
+ size += range_header_len;
+ if (fwrt->trans->trans_cfg->gen2)
+ size += fwrt->trans->init_dram.paging[i].size;
+ else
+ size += fwrt->fw_paging_db[i].fw_paging_size;
+ }
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_mon *fw_mon;
+ u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ int i;
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ for (i = 0; i < fw_mon->num_frags; i++) {
+ struct iwl_dram_data *frag = &fw_mon->frags[i];
+
+ if (!frag->size)
+ break;
+
+ size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size;
+ }
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_monitor_dump);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size;
+
+ size = le32_to_cpu(reg->internal_buffer.size);
+ if (!size)
+ return 0;
+
+ size += sizeof(struct iwl_fw_ini_monitor_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
+}
+
+static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 size = 0;
+ u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) +
+ registers_num *
+ sizeof(struct iwl_fw_ini_error_dump_register);
+
+ while (iwl_ini_txf_iter(fwrt, reg_data, size)) {
+ size += fifo_hdr;
+ if (!reg->fifos.hdr_only)
+ size += iter->fifo_size;
+ }
+
+ if (!size)
+ return 0;
+
+ return size + sizeof(struct iwl_fw_ini_error_dump);
+}
+
+static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_ini_rxf_data rx_data;
+ u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
+ u32 size = sizeof(struct iwl_fw_ini_error_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range) +
+ registers_num * sizeof(struct iwl_fw_ini_error_dump_register);
+
+ if (reg->fifos.hdr_only)
+ return size;
+
+ iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data);
+ size += rx_data.size;
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->err_table.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_err_table_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->special_mem.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_special_device_memory) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+
+ if (!reg_data->dump_data->fw_pkt)
+ return 0;
+
+ size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt);
+ if (size)
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
+iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+ u32 ranges = 0;
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return size;
+ }
+ size = imr_size;
+ ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
+ if (!size && !ranges) {
+ IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
+ return 0;
+ }
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ ranges * sizeof(struct iwl_fw_ini_error_dump_range);
+ return size;
+}
+
+/**
+ * struct iwl_dump_ini_mem_ops - ini memory dump operations
+ * @get_num_of_ranges: returns the number of memory ranges in the region.
+ * @get_size: returns the total size of the region.
+ * @fill_mem_hdr: fills region type specific headers and returns pointer to
+ * the first range or NULL if failed to fill headers.
+ * @fill_range: copies a given memory range into the dump.
+ * Returns the size of the range or negative error value otherwise.
+ */
+struct iwl_dump_ini_mem_ops {
+ u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data);
+ u32 (*get_size)(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data);
+ void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len);
+ int (*fill_range)(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range, u32 range_len, int idx);
+};
+
+/**
+ * iwl_dump_ini_mem
+ *
+ * Creates a dump tlv and copy a memory region into it.
+ * Returns the size of the current dump tlv or 0 if failed
+ *
+ * @fwrt: fw runtime struct
+ * @list: list to add the dump tlv to
+ * @reg_data: memory region
+ * @ops: memory dump operations
+ */
+static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
+ struct iwl_dump_ini_region_data *reg_data,
+ const struct iwl_dump_ini_mem_ops *ops)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_dump_entry *entry;
+ struct iwl_fw_ini_error_dump_data *tlv;
+ struct iwl_fw_ini_error_dump_header *header;
+ u32 type = reg->type;
+ u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
+ u32 num_of_ranges, i, size;
+ u8 *range;
+ u32 free_size;
+ u64 header_size;
+ u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
+
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
+ dump_policy, id, type);
+
+ if (le32_to_cpu(reg->hdr.version) >= 2) {
+ u32 dp = le32_get_bits(reg->id,
+ IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
+ if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
+ !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ }
+ }
+
+ if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
+ !ops->fill_range) {
+ IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
+ return 0;
+ }
+
+ size = ops->get_size(fwrt, reg_data);
+
+ if (size < sizeof(*header)) {
+ IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
+ return 0;
+ }
+
+ entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
+ if (!entry)
+ return 0;
+
+ entry->size = sizeof(*tlv) + size;
+
+ tlv = (void *)entry->data;
+ tlv->type = reg->type;
+ tlv->sub_type = reg->sub_type;
+ tlv->sub_type_ver = reg->sub_type_ver;
+ tlv->reserved = reg->reserved;
+ tlv->len = cpu_to_le32(size);
+
+ num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
+
+ header = (void *)tlv->data;
+ header->region_id = cpu_to_le32(id);
+ header->num_of_ranges = cpu_to_le32(num_of_ranges);
+ header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
+ memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
+
+ free_size = size;
+ range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
+ if (!range) {
+ IWL_ERR(fwrt,
+ "WRT: Failed to fill region header: id=%d, type=%d\n",
+ id, type);
+ goto out_err;
+ }
+
+ header_size = range - (u8 *)header;
+
+ if (WARN(header_size > free_size,
+ "header size %llu > free_size %d",
+ header_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_mem_hdr used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= header_size;
+
+ for (i = 0; i < num_of_ranges; i++) {
+ int range_size = ops->fill_range(fwrt, reg_data, range,
+ free_size, i);
+
+ if (range_size < 0) {
+ IWL_ERR(fwrt,
+ "WRT: Failed to dump region: id=%d, type=%d\n",
+ id, type);
+ goto out_err;
+ }
+
+ if (WARN(range_size > free_size, "range_size %d > free_size %d",
+ range_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_raged used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= range_size;
+ range = range + range_size;
+ }
+
+ list_add_tail(&entry->list, list);
+
+ return entry->size;
+
+out_err:
+ vfree(entry);
+
+ return 0;
+}
+
+static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger_tlv *trigger,
+ struct list_head *list)
+{
+ struct iwl_fw_ini_dump_entry *entry;
+ struct iwl_fw_error_dump_data *tlv;
+ struct iwl_fw_ini_dump_info *dump;
+ struct iwl_dbg_tlv_node *node;
+ struct iwl_fw_ini_dump_cfg_name *cfg_name;
+ u32 size = sizeof(*tlv) + sizeof(*dump);
+ u32 num_of_cfg_names = 0;
+ u32 hw_type;
+
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ size += sizeof(*cfg_name);
+ num_of_cfg_names++;
+ }
+
+ entry = vzalloc(sizeof(*entry) + size);
+ if (!entry)
+ return 0;
+
+ entry->size = size;
+
+ tlv = (void *)entry->data;
+ tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE);
+ tlv->len = cpu_to_le32(size - sizeof(*tlv));
+
+ dump = (void *)tlv->data;
+
+ dump->version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->time_point = trigger->time_point;
+ dump->trigger_reason = trigger->trigger_reason;
+ dump->external_cfg_state =
+ cpu_to_le32(fwrt->trans->dbg.external_ini_cfg);
+
+ dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
+ dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
+
+ dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step);
+
+ /*
+ * Several HWs all have type == 0x42, so we'll override this value
+ * according to the detected HW
+ */
+ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
+ if (hw_type == IWL_AX210_HW_TYPE) {
+ u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
+ u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
+ u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
+ u32 masked_bits = is_jacket | (is_cdb << 1);
+
+ /*
+ * The HW type depends on certain bits in this case, so add
+ * these bits to the HW type. We won't have collisions since we
+ * add these bits after the highest possible bit in the mask.
+ */
+ hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT;
+ }
+ dump->hw_type = cpu_to_le32(hw_type);
+
+ dump->rf_id_flavor =
+ cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
+ dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id));
+ dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id));
+ dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id));
+
+ dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major);
+ dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor);
+ dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major);
+ dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
+
+ dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
+ dump->regions_mask = trigger->regions_mask &
+ ~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk);
+
+ dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
+ memcpy(dump->build_tag, fwrt->fw->human_readable,
+ sizeof(dump->build_tag));
+
+ cfg_name = dump->cfg_names;
+ dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names);
+ list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
+ struct iwl_fw_ini_debug_info_tlv *debug_info =
+ (void *)node->tlv.data;
+
+ cfg_name->image_type = debug_info->image_type;
+ cfg_name->cfg_name_len =
+ cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
+ sizeof(cfg_name->cfg_name));
+ cfg_name++;
+ }
+
+ /* add dump info TLV to the beginning of the list since it needs to be
+ * the first TLV in the dump
+ */
+ list_add(&entry->list, list);
+
+ return entry->size;
+}
+
+static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
+ [IWL_FW_INI_REGION_INVALID] = {},
+ [IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_mon_smem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header,
+ .fill_range = iwl_dump_ini_mon_smem_iter,
+ },
+ [IWL_FW_INI_REGION_DRAM_BUFFER] = {
+ .get_num_of_ranges = iwl_dump_ini_mon_dram_ranges,
+ .get_size = iwl_dump_ini_mon_dram_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header,
+ .fill_range = iwl_dump_ini_mon_dram_iter,
+ },
+ [IWL_FW_INI_REGION_TXF] = {
+ .get_num_of_ranges = iwl_dump_ini_txf_ranges,
+ .get_size = iwl_dump_ini_txf_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_txf_iter,
+ },
+ [IWL_FW_INI_REGION_RXF] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_rxf_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_rxf_iter,
+ },
+ [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_err_table_get_size,
+ .fill_mem_hdr = iwl_dump_ini_err_table_fill_header,
+ .fill_range = iwl_dump_ini_err_table_iter,
+ },
+ [IWL_FW_INI_REGION_RSP_OR_NOTIF] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_fw_pkt_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_fw_pkt_iter,
+ },
+ [IWL_FW_INI_REGION_DEVICE_MEMORY] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_dev_mem_iter,
+ },
+ [IWL_FW_INI_REGION_PERIPHERY_MAC] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_prph_mac_iter,
+ },
+ [IWL_FW_INI_REGION_PERIPHERY_PHY] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_prph_phy_iter,
+ },
+ [IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
+ [IWL_FW_INI_REGION_PAGING] = {
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_num_of_ranges = iwl_dump_ini_paging_ranges,
+ .get_size = iwl_dump_ini_paging_get_size,
+ .fill_range = iwl_dump_ini_paging_iter,
+ },
+ [IWL_FW_INI_REGION_CSR] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_csr_iter,
+ },
+ [IWL_FW_INI_REGION_DRAM_IMR] = {
+ .get_num_of_ranges = iwl_dump_ini_imr_ranges,
+ .get_size = iwl_dump_ini_imr_get_size,
+ .fill_mem_hdr = iwl_dump_ini_imr_fill_header,
+ .fill_range = iwl_dump_ini_imr_iter,
+ },
+ [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_config_iter,
+ },
+ [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_special_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header,
+ .fill_range = iwl_dump_ini_special_mem_iter,
+ },
+ [IWL_FW_INI_REGION_DBGI_SRAM] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mon_dbgi_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
+ .fill_range = iwl_dump_ini_dbgi_sram_iter,
+ },
+};
+
+static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ struct list_head *list)
+{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
+ struct iwl_dump_ini_region_data reg_data = {
+ .dump_data = dump_data,
+ };
+ struct iwl_dump_ini_region_data imr_reg_data = {
+ .dump_data = dump_data,
+ };
+ int i;
+ u32 size = 0;
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
+ ~(fwrt->trans->dbg.unsupported_region_msk);
+
+ BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
+ BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
+ ARRAY_SIZE(fwrt->trans->dbg.active_regions));
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
+ u32 reg_type;
+ struct iwl_fw_ini_region_tlv *reg;
+
+ if (!(BIT_ULL(i) & regions_mask))
+ continue;
+
+ reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ if (!reg_data.reg_tlv) {
+ IWL_WARN(fwrt,
+ "WRT: Unassigned region id %d, skipping\n", i);
+ continue;
+ }
+
+ reg = (void *)reg_data.reg_tlv->data;
+ reg_type = reg->type;
+ if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
+ continue;
+
+ if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY &&
+ tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) {
+ IWL_WARN(fwrt,
+ "WRT: trying to collect phy prph at time point: %d, skipping\n",
+ tp_id);
+ continue;
+ }
+ /*
+ * DRAM_IMR can be collected only for FW/HW error timepoint
+ * when fw is not alive. In addition, it must be collected
+ * lastly as it overwrites SRAM that can possibly contain
+ * debug data which also need to be collected.
+ */
+ if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
+ if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+ tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+ imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ else
+ IWL_INFO(fwrt,
+ "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
+ tp_id);
+ /* continue to next region */
+ continue;
+ }
+
+
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ &iwl_dump_ini_region_ops[reg_type]);
+ }
+ /* collect DRAM_IMR region in the last */
+ if (imr_reg_data.reg_tlv)
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
+
+ if (size)
+ size += iwl_dump_ini_info(fwrt, trigger, list);
+
+ return size;
+}
+
+static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger_tlv *trig)
+{
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
+ u32 usec = le32_to_cpu(trig->ignore_consec);
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
+ tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
+ tp_id >= IWL_FW_INI_TIME_POINT_NUM ||
+ iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec))
+ return false;
+
+ return true;
+}
+
+static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ struct list_head *list)
+{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ struct iwl_fw_ini_dump_entry *entry;
+ struct iwl_fw_ini_dump_file_hdr *hdr;
+ u32 size;
+
+ if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) ||
+ !le64_to_cpu(trigger->regions_mask))
+ return 0;
+
+ entry = vzalloc(sizeof(*entry) + sizeof(*hdr));
+ if (!entry)
+ return 0;
+
+ entry->size = sizeof(*hdr);
+
+ size = iwl_dump_ini_trigger(fwrt, dump_data, list);
+ if (!size) {
+ vfree(entry);
+ return 0;
+ }
+
+ hdr = (void *)entry->data;
+ hdr->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER);
+ hdr->file_len = cpu_to_le32(size + entry->size);
+
+ list_add(&entry->list, list);
+
+ return le32_to_cpu(hdr->file_len);
+}
+
+static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc *desc)
+{
+ if (desc && desc != &iwl_dump_desc_assert)
+ kfree(desc);
+
+ fwrt->dump.lmac_err_id[0] = 0;
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ fwrt->dump.lmac_err_id[1] = 0;
+ fwrt->dump.umac_err_id = 0;
+}
+
+static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
+{
+ struct iwl_fw_dump_ptrs fw_error_dump = {};
+ struct iwl_fw_error_dump_file *dump_file;
+ struct scatterlist *sg_dump_data;
+ u32 file_len;
+ u32 dump_mask = fwrt->fw->dbg.dump_mask;
+
+ dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data);
+ if (!dump_file)
+ return;
+
+ if (dump_data->monitor_only)
+ dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
+
+ fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask,
+ fwrt->sanitize_ops,
+ fwrt->sanitize_ctx);
+ file_len = le32_to_cpu(dump_file->file_len);
+ fw_error_dump.fwrt_len = file_len;
+
+ if (fw_error_dump.trans_ptr) {
+ file_len += fw_error_dump.trans_ptr->len;
+ dump_file->file_len = cpu_to_le32(file_len);
+ }
+
+ sg_dump_data = alloc_sgtable(file_len);
+ if (sg_dump_data) {
+ sg_pcopy_from_buffer(sg_dump_data,
+ sg_nents(sg_dump_data),
+ fw_error_dump.fwrt_ptr,
+ fw_error_dump.fwrt_len, 0);
+ if (fw_error_dump.trans_ptr)
+ sg_pcopy_from_buffer(sg_dump_data,
+ sg_nents(sg_dump_data),
+ fw_error_dump.trans_ptr->data,
+ fw_error_dump.trans_ptr->len,
+ fw_error_dump.fwrt_len);
+ dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
+ GFP_KERNEL);
+ }
+ vfree(fw_error_dump.fwrt_ptr);
+ vfree(fw_error_dump.trans_ptr);
+}
+
+static void iwl_dump_ini_list_free(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct iwl_fw_ini_dump_entry *entry =
+ list_entry(list->next, typeof(*entry), list);
+
+ list_del(&entry->list);
+ vfree(entry);
+ }
+}
+
+static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
+{
+ dump_data->trig = NULL;
+ kfree(dump_data->fw_pkt);
+ dump_data->fw_pkt = NULL;
+}
+
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data)
+{
+ LIST_HEAD(dump_list);
+ struct scatterlist *sg_dump_data;
+ u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
+
+ if (!file_len)
+ return;
+
+ sg_dump_data = alloc_sgtable(file_len);
+ if (sg_dump_data) {
+ struct iwl_fw_ini_dump_entry *entry;
+ int sg_entries = sg_nents(sg_dump_data);
+ u32 offs = 0;
+
+ list_for_each_entry(entry, &dump_list, list) {
+ sg_pcopy_from_buffer(sg_dump_data, sg_entries,
+ entry->data, entry->size, offs);
+ offs += entry->size;
+ }
+ dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
+ GFP_KERNEL);
+ }
+ iwl_dump_ini_list_free(&dump_list);
+}
+
+const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
+ .trig_desc = {
+ .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
+ },
+};
+IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
+
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc *desc,
+ bool monitor_only,
+ unsigned int delay)
+{
+ struct iwl_fwrt_wk_data *wk_data;
+ unsigned long idx;
+
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_free_dump_desc(fwrt, desc);
+ return 0;
+ }
+
+ /*
+ * Check there is an available worker.
+ * ffz return value is undefined if no zero exists,
+ * so check against ~0UL first.
+ */
+ if (fwrt->dump.active_wks == ~0UL)
+ return -EBUSY;
+
+ idx = ffz(fwrt->dump.active_wks);
+
+ if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
+ test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
+ return -EBUSY;
+
+ wk_data = &fwrt->dump.wks[idx];
+
+ if (WARN_ON(wk_data->dump_data.desc))
+ iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc);
+
+ wk_data->dump_data.desc = desc;
+ wk_data->dump_data.monitor_only = monitor_only;
+
+ IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
+ le32_to_cpu(desc->trig_desc.type));
+
+ schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
+
+int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig_type)
+{
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
+ return -EIO;
+
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT &&
+ trig_type != FW_DBG_TRIGGER_DRIVER)
+ return -EIO;
+
+ iwl_dbg_tlv_time_point(fwrt,
+ IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
+ NULL);
+ } else {
+ struct iwl_fw_dump_desc *iwl_dump_error_desc;
+ int ret;
+
+ iwl_dump_error_desc =
+ kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
+
+ if (!iwl_dump_error_desc)
+ return -ENOMEM;
+
+ iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
+ iwl_dump_error_desc->len = 0;
+
+ ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc,
+ false, 0);
+ if (ret) {
+ kfree(iwl_dump_error_desc);
+ return ret;
+ }
+ }
+
+ iwl_trans_sync_nmi(fwrt->trans);
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
+
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+ struct iwl_fw_dump_desc *desc;
+ unsigned int delay = 0;
+ bool monitor_only = false;
+
+ if (trigger) {
+ u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
+
+ if (!le16_to_cpu(trigger->occurrences))
+ return 0;
+
+ if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
+ IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
+ trig);
+ iwl_force_nmi(fwrt->trans);
+ return 0;
+ }
+
+ trigger->occurrences = cpu_to_le16(occurrences);
+ monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
+
+ /* convert msec to usec */
+ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
+ }
+
+ desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
+ if (!desc)
+ return -ENOMEM;
+
+
+ desc->len = len;
+ desc->trig_desc.type = cpu_to_le32(trig);
+ memcpy(desc->trig_desc.data, str, len);
+
+ return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
+
+int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_trigger_tlv *trigger,
+ const char *fmt, ...)
+{
+ int ret, len = 0;
+ char buf[64];
+
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ return 0;
+
+ if (fmt) {
+ va_list ap;
+
+ buf[sizeof(buf) - 1] = '\0';
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ /* check for truncation */
+ if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
+ buf[sizeof(buf) - 1] = '\0';
+
+ len = strlen(buf) + 1;
+ }
+
+ ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
+ trigger);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
+
+int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
+{
+ u8 *ptr;
+ int ret;
+ int i;
+
+ if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
+ "Invalid configuration %d\n", conf_id))
+ return -EINVAL;
+
+ /* EARLY START - firmware's configuration is hard coded */
+ if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
+ !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
+ conf_id == FW_DBG_START_FROM_ALIVE)
+ return 0;
+
+ if (!fwrt->fw->dbg.conf_tlv[conf_id])
+ return -EINVAL;
+
+ if (fwrt->dump.conf != FW_DBG_INVALID)
+ IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n",
+ fwrt->dump.conf);
+
+ /* Send all HCMDs for configuring the FW debug */
+ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
+ for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
+ struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
+ struct iwl_host_cmd hcmd = {
+ .id = cmd->id,
+ .len = { le16_to_cpu(cmd->len), },
+ .data = { cmd->data, },
+ };
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (ret)
+ return ret;
+
+ ptr += sizeof(*cmd);
+ ptr += le16_to_cpu(cmd->len);
+ }
+
+ fwrt->dump.conf = conf_id;
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
+
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data)
+{
+ struct iwl_dbg_dump_complete_cmd hcmd_data;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
+ .data[0] = &hcmd_data,
+ .len[0] = sizeof(hcmd_data),
+ };
+
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ return;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
+ hcmd_data.tp = cpu_to_le32(timepoint);
+ hcmd_data.tp_data = cpu_to_le32(timepoint_data);
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ }
+}
+
+/* this function assumes dump_start was called beforehand and dump_end will be
+ * called afterwards
+ */
+static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+{
+ struct iwl_fw_dbg_params params = {0};
+ struct iwl_fwrt_dump_data *dump_data =
+ &fwrt->dump.wks[wk_idx].dump_data;
+ u32 policy;
+ u32 time_point;
+ if (!test_bit(wk_idx, &fwrt->dump.active_wks))
+ return;
+
+ if (!dump_data->trig) {
+ IWL_ERR(fwrt, "dump trigger data is not set\n");
+ goto out;
+ }
+
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
+ goto out;
+ }
+
+ /* there's no point in fw dump if the bus is dead */
+ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+ goto out;
+ }
+
+ iwl_fw_dbg_stop_restart_recording(fwrt, &params, true);
+
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
+ else
+ iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
+
+ iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+
+ policy = le32_to_cpu(dump_data->trig->apply_policy);
+ time_point = le32_to_cpu(dump_data->trig->time_point);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
+ if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
+ iwl_force_nmi(fwrt->trans);
+
+out:
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_error_dump_data_free(dump_data);
+ } else {
+ iwl_fw_free_dump_desc(fwrt, dump_data->desc);
+ dump_data->desc = NULL;
+ }
+
+ clear_bit(wk_idx, &fwrt->dump.active_wks);
+}
+
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ bool sync)
+{
+ struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
+ u32 occur, delay;
+ unsigned long idx;
+
+ if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
+ IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
+ tp_id);
+ return -EINVAL;
+ }
+
+ delay = le32_to_cpu(trig->dump_delay);
+ occur = le32_to_cpu(trig->occurrences);
+ if (!occur)
+ return 0;
+
+ trig->occurrences = cpu_to_le32(--occur);
+
+ /* Check there is an available worker.
+ * ffz return value is undefined if no zero exists,
+ * so check against ~0UL first.
+ */
+ if (fwrt->dump.active_wks == ~0UL)
+ return -EBUSY;
+
+ idx = ffz(fwrt->dump.active_wks);
+
+ if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
+ test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
+ return -EBUSY;
+
+ fwrt->dump.wks[idx].dump_data = *dump_data;
+
+ if (sync)
+ delay = 0;
+
+ IWL_WARN(fwrt,
+ "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
+ tp_id, (u32)(delay / USEC_PER_MSEC));
+
+ if (sync)
+ iwl_fw_dbg_collect_sync(fwrt, idx);
+ else
+ schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
+
+ return 0;
+}
+
+void iwl_fw_error_dump_wk(struct work_struct *work)
+{
+ struct iwl_fwrt_wk_data *wks =
+ container_of(work, typeof(*wks), wk.work);
+ struct iwl_fw_runtime *fwrt =
+ container_of(wks, typeof(*fwrt), dump.wks[wks->idx]);
+
+ /* assumes the op mode mutex is locked in dump_start since
+ * iwl_fw_dbg_collect_sync can't run in parallel
+ */
+ if (fwrt->ops && fwrt->ops->dump_start)
+ fwrt->ops->dump_start(fwrt->ops_ctx);
+
+ iwl_fw_dbg_collect_sync(fwrt, wks->idx);
+
+ if (fwrt->ops && fwrt->ops->dump_end)
+ fwrt->ops->dump_end(fwrt->ops_ctx);
+}
+
+void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
+{
+ const struct iwl_cfg *cfg = fwrt->trans->cfg;
+
+ if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
+ return;
+
+ if (!fwrt->dump.d3_debug_data) {
+ fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
+ GFP_KERNEL);
+ if (!fwrt->dump.d3_debug_data) {
+ IWL_ERR(fwrt,
+ "failed to allocate memory for D3 debug data\n");
+ return;
+ }
+ }
+
+ /* if the buffer holds previous debug data it is overwritten */
+ iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
+ fwrt->dump.d3_debug_data,
+ cfg->d3_debug_data_length);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
+ cfg->d3_debug_data_base_addr,
+ fwrt->dump.d3_debug_data,
+ cfg->d3_debug_data_length);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
+
+void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt)
+{
+ int i;
+
+ iwl_dbg_tlv_del_timers(fwrt->trans);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
+ iwl_fw_dbg_collect_sync(fwrt, i);
+
+ iwl_fw_dbg_stop_restart_recording(fwrt, NULL, true);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
+
+static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
+{
+ struct iwl_dbg_suspend_resume_cmd cmd = {
+ .operation = suspend ?
+ cpu_to_le32(DBGC_SUSPEND_CMD) :
+ cpu_to_le32(DBGC_RESUME_CMD),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, DBGC_SUSPEND_RESUME),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+
+ return iwl_trans_send_cmd(trans, &hcmd);
+}
+
+static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
+ struct iwl_fw_dbg_params *params)
+{
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ return;
+ }
+
+ if (params) {
+ params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE);
+ params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL);
+ }
+
+ iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0);
+ /* wait for the DBGC to finish writing the internal buffer to DRAM to
+ * avoid halting the HW while writing
+ */
+ usleep_range(700, 1000);
+ iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0);
+}
+
+static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
+ struct iwl_fw_dbg_params *params)
+{
+ if (!params)
+ return -EIO;
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
+ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
+ } else {
+ iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
+ iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
+ }
+
+ return 0;
+}
+
+void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_params *params,
+ bool stop)
+{
+ int ret __maybe_unused = 0;
+
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ return;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP))
+ ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop);
+ else if (stop)
+ iwl_fw_dbg_stop_recording(fwrt->trans, params);
+ else
+ ret = iwl_fw_dbg_restart_recording(fwrt->trans, params);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (!ret) {
+ if (stop)
+ fwrt->trans->dbg.rec_on = false;
+ else
+ iwl_fw_set_dbg_rec_on(fwrt);
+ }
+#endif
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
new file mode 100644
index 000000000..be7806407
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_dbg_h__
+#define __iwl_fw_dbg_h__
+#include <linux/workqueue.h>
+#include <net/cfg80211.h>
+#include "runtime.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "file.h"
+#include "error-dump.h"
+#include "api/commands.h"
+#include "api/dbg-tlv.h"
+#include "api/alive.h"
+
+/**
+ * struct iwl_fw_dump_desc - describes the dump
+ * @len: length of trig_desc->data
+ * @trig_desc: the description of the dump
+ */
+struct iwl_fw_dump_desc {
+ size_t len;
+ /* must be last */
+ struct iwl_fw_error_dump_trigger_desc trig_desc;
+};
+
+/**
+ * struct iwl_fw_dbg_params - register values to restore
+ * @in_sample: DBGC_IN_SAMPLE value
+ * @out_ctrl: DBGC_OUT_CTRL value
+ */
+struct iwl_fw_dbg_params {
+ u32 in_sample;
+ u32 out_ctrl;
+};
+
+extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
+
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc *desc,
+ bool monitor_only, unsigned int delay);
+int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig_type);
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ bool sync);
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig, const char *str,
+ size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
+int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_trigger_tlv *trigger,
+ const char *fmt, ...) __printf(3, 4);
+int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
+
+#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
+ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
+ unlikely(__dbg_trigger); \
+})
+
+static inline struct iwl_fw_dbg_trigger_tlv*
+_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
+{
+ return fw->dbg.trigger_tlv[id];
+}
+
+#define iwl_fw_dbg_get_trigger(fw, id) ({ \
+ BUILD_BUG_ON(!__builtin_constant_p(id)); \
+ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
+ _iwl_fw_dbg_get_trigger((fw), (id)); \
+})
+
+static inline bool
+iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
+ struct wireless_dev *wdev)
+{
+ u32 trig_vif = le32_to_cpu(trig->vif_type);
+
+ return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+ wdev->iftype == trig_vif;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_trigger_tlv *trig)
+{
+ return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
+ (fwrt->dump.conf == FW_DBG_INVALID ||
+ (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids))));
+}
+
+static inline bool
+iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_usec)
+{
+ unsigned long wind_jiff = usecs_to_jiffies(dis_usec);
+
+ /* If this is the first event checked, jump to update start ts */
+ if (fwrt->dump.non_collect_ts_start[id] &&
+ (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff,
+ jiffies)))
+ return true;
+
+ fwrt->dump.non_collect_ts_start[id] = jiffies;
+ return false;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
+ struct wireless_dev *wdev,
+ struct iwl_fw_dbg_trigger_tlv *trig)
+{
+ u32 usec = le16_to_cpu(trig->trig_dis_ms) * USEC_PER_MSEC;
+
+ if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
+ return false;
+
+ if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), usec)) {
+ IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
+ trig->id);
+ return false;
+ }
+
+ return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
+}
+
+static inline struct iwl_fw_dbg_trigger_tlv*
+_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct wireless_dev *wdev,
+ const enum iwl_fw_dbg_trigger id)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+
+ if (iwl_trans_dbg_ini_valid(fwrt->trans))
+ return NULL;
+
+ if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
+ return NULL;
+
+ trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id);
+
+ if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig))
+ return NULL;
+
+ return trig;
+}
+
+#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \
+ BUILD_BUG_ON(!__builtin_constant_p(id)); \
+ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
+ _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
+})
+
+static inline void
+_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
+ struct wireless_dev *wdev,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+ if (!trigger)
+ return;
+
+ if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger))
+ return;
+
+ iwl_fw_dbg_collect_trig(fwrt, trigger, NULL);
+}
+
+#define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \
+ _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \
+ iwl_fw_dbg_get_trigger((fwrt)->fw,\
+ (trig)))
+void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_params *params,
+ bool stop);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
+{
+ if (fwrt->cur_fw_img == IWL_UCODE_REGULAR &&
+ (fwrt->fw->dbg.dest_tlv ||
+ fwrt->trans->dbg.ini_dest != IWL_FW_INI_LOCATION_INVALID))
+ fwrt->trans->dbg.rec_on = true;
+}
+#endif
+
+static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
+{
+ fwrt->dump.conf = FW_DBG_INVALID;
+}
+
+void iwl_fw_error_dump_wk(struct work_struct *work);
+
+static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type)
+{
+ return (fwrt->fw->dbg.dump_mask & BIT(type));
+}
+
+static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
+{
+ return fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
+ fwrt->trans->cfg->d3_debug_data_length && fwrt->ops &&
+ fwrt->ops->d3_debug_enable &&
+ fwrt->ops->d3_debug_enable(fwrt->ops_ctx) &&
+ iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+}
+
+static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
+{
+ return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->trans_cfg->gen2 &&
+ fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX &&
+ fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
+ fwrt->fw_paging_db[0].fw_paging_block;
+}
+
+void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt)
+{
+ int i;
+
+ iwl_dbg_tlv_del_timers(fwrt->trans);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
+ flush_delayed_work(&fwrt->dump.wks[i].wk);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ fwrt->timestamp.delay = 0;
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay);
+
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ if (!fwrt->timestamp.delay)
+ return;
+
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(fwrt->timestamp.delay));
+}
+
+#else
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt,
+ u32 delay) {}
+
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans,
+ u32 lmac_error_event_table)
+{
+ if (!(trans->dbg.error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_LMAC1) ||
+ WARN_ON(trans->dbg.lmac_error_event_table[0] !=
+ lmac_error_event_table))
+ trans->dbg.lmac_error_event_table[0] = lmac_error_event_table;
+}
+
+static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
+ u32 umac_error_event_table)
+{
+ if (!(trans->dbg.error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_UMAC) ||
+ WARN_ON(trans->dbg.umac_error_event_table !=
+ umac_error_event_table))
+ trans->dbg.umac_error_event_table = umac_error_event_table;
+}
+
+static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
+{
+ enum iwl_fw_ini_time_point tp_id;
+
+ if (!iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ return;
+ }
+
+ if (fwrt->trans->dbg.hw_error) {
+ tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
+ fwrt->trans->dbg.hw_error = false;
+ } else {
+ tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
+ }
+
+ _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
+}
+
+void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
+ struct iwl_lmac_alive *lmac,
+ struct iwl_umac_alive *umac)
+{
+ if (lmac) {
+ fwrt->dump.fw_ver.type = lmac->ver_type;
+ fwrt->dump.fw_ver.subtype = lmac->ver_subtype;
+ fwrt->dump.fw_ver.lmac_major = le32_to_cpu(lmac->ucode_major);
+ fwrt->dump.fw_ver.lmac_minor = le32_to_cpu(lmac->ucode_minor);
+ }
+
+ if (umac) {
+ fwrt->dump.fw_ver.umac_major = le32_to_cpu(umac->umac_major);
+ fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
+ }
+}
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data);
+#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
new file mode 100644
index 000000000..607e07ed2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include "api/commands.h"
+#include "debugfs.h"
+#include "dbg.h"
+#include <linux/seq_file.h>
+
+#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+struct dbgfs_##name##_data { \
+ argtype *arg; \
+ bool read_done; \
+ ssize_t rlen; \
+ char rbuf[buflen]; \
+}; \
+static int _iwl_dbgfs_##name##_open(struct inode *inode, \
+ struct file *file) \
+{ \
+ struct dbgfs_##name##_data *data; \
+ \
+ data = kzalloc(sizeof(*data), GFP_KERNEL); \
+ if (!data) \
+ return -ENOMEM; \
+ \
+ data->read_done = false; \
+ data->arg = inode->i_private; \
+ file->private_data = data; \
+ \
+ return 0; \
+}
+
+#define FWRT_DEBUGFS_READ_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ \
+ if (!data->read_done) { \
+ data->read_done = true; \
+ data->rlen = iwl_dbgfs_##name##_read(data->arg, \
+ sizeof(data->rbuf),\
+ data->rbuf); \
+ } \
+ \
+ if (data->rlen < 0) \
+ return data->rlen; \
+ return simple_read_from_buffer(user_buf, count, ppos, \
+ data->rbuf, data->rlen); \
+}
+
+static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_READ_WRAPPER(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+}
+
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = \
+ ((struct dbgfs_##name##_data *)file->private_data)->arg;\
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size); \
+}
+
+#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_READ_WRAPPER(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+}
+
+#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+}
+
+#define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \
+ _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ debugfs_create_file(alias, mode, parent, fwrt, \
+ &iwl_dbgfs_##name##_ops); \
+ } while (0)
+#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
+ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_mvm_marker marker = {
+ .dw_len = sizeof(struct iwl_mvm_marker) / 4,
+ .marker_id = MARKER_ID_SYNC_CLOCK,
+
+ /* the real timestamp is taken from the ftrace clock
+ * this is for finding the match between fw and kernel logs
+ */
+ .timestamp = cpu_to_le64(fwrt->timestamp.seq++),
+ };
+
+ struct iwl_host_cmd hcmd = {
+ .id = MARKER_CMD,
+ .flags = CMD_ASYNC,
+ .data[0] = &marker,
+ .len[0] = sizeof(marker),
+ };
+
+ return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ struct iwl_dbg_host_event_cfg_cmd event_cfg;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
+ .flags = CMD_ASYNC,
+ .data[0] = &event_cfg,
+ .len[0] = sizeof(event_cfg),
+ };
+ u32 enabled_severities;
+ int ret = kstrtou32(buf, 10, &enabled_severities);
+
+ if (ret < 0)
+ return ret;
+
+ event_cfg.enabled_severities = cpu_to_le32(enabled_severities);
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ IWL_INFO(fwrt,
+ "sent host event cfg with enabled_severities: %u, ret: %d\n",
+ enabled_severities, ret);
+
+ return ret ?: count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(enabled_severities, 16);
+
+static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
+{
+ int ret;
+ struct iwl_fw_runtime *fwrt =
+ container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
+ unsigned long delay = fwrt->timestamp.delay;
+
+ ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
+ if (!ret && delay)
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(delay));
+ else
+ IWL_INFO(fwrt,
+ "stopping timestamp_marker, ret: %d, delay: %u\n",
+ ret, jiffies_to_msecs(delay) / 1000);
+}
+
+void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay)
+{
+ IWL_INFO(fwrt,
+ "starting timestamp_marker trigger with delay: %us\n",
+ delay);
+
+ iwl_fw_cancel_timestamp(fwrt);
+
+ fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(fwrt->timestamp.delay));
+}
+
+static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count)
+{
+ int ret;
+ u32 delay;
+
+ ret = kstrtou32(buf, 10, &delay);
+ if (ret < 0)
+ return ret;
+
+ iwl_fw_trigger_timestamp(fwrt, delay);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000;
+
+ return scnprintf(buf, size, "%d\n", delay_secs);
+}
+
+FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
+
+struct hcmd_write_data {
+ __be32 cmd_id;
+ __be32 flags;
+ __be16 length;
+ u8 data[];
+} __packed;
+
+static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ size_t count)
+{
+ size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
+ size_t data_size = (count - 1) / 2;
+ int ret;
+ struct hcmd_write_data *data;
+ struct iwl_host_cmd hcmd = {
+ .len = { 0, },
+ .data = { NULL, },
+ };
+
+ if (fwrt->ops && fwrt->ops->fw_running &&
+ !fwrt->ops->fw_running(fwrt->ops_ctx))
+ return -EIO;
+
+ if (count < header_size + 1 || count > 1024 * 4)
+ return -EINVAL;
+
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = hex2bin((u8 *)data, buf, data_size);
+ if (ret)
+ goto out;
+
+ hcmd.id = be32_to_cpu(data->cmd_id);
+ hcmd.flags = be32_to_cpu(data->flags);
+ hcmd.len[0] = be16_to_cpu(data->length);
+ hcmd.data[0] = data->data;
+
+ if (count != header_size + hcmd.len[0] * 2 + 1) {
+ IWL_ERR(fwrt,
+ "host command data size does not match header length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fwrt->ops && fwrt->ops->send_hcmd)
+ ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+ else
+ ret = -EPERM;
+
+ if (ret < 0)
+ goto out;
+
+ if (hcmd.flags & CMD_WANT_SKB)
+ iwl_free_resp(&hcmd);
+out:
+ kfree(data);
+ return ret ?: count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
+
+static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ return scnprintf(buf, size, "0x%08x\n",
+ fwrt->trans->dbg.domains_bitmap);
+}
+
+FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
+
+struct iwl_dbgfs_fw_info_priv {
+ struct iwl_fw_runtime *fwrt;
+};
+
+struct iwl_dbgfs_fw_info_state {
+ loff_t pos;
+};
+
+static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ struct iwl_dbgfs_fw_info_state *state = v;
+ struct iwl_dbgfs_fw_info_priv *priv = seq->private;
+ const struct iwl_fw *fw = priv->fwrt->fw;
+
+ *pos = ++state->pos;
+ if (*pos >= fw->ucode_capa.n_cmd_versions) {
+ kfree(state);
+ return NULL;
+ }
+
+ return state;
+}
+
+static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq,
+ void *v)
+{
+ kfree(v);
+}
+
+static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct iwl_dbgfs_fw_info_priv *priv = seq->private;
+ const struct iwl_fw *fw = priv->fwrt->fw;
+ struct iwl_dbgfs_fw_info_state *state;
+
+ if (*pos >= fw->ucode_capa.n_cmd_versions)
+ return NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pos = *pos;
+ return state;
+};
+
+static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
+{
+ struct iwl_dbgfs_fw_info_state *state = v;
+ struct iwl_dbgfs_fw_info_priv *priv = seq->private;
+ const struct iwl_fw *fw = priv->fwrt->fw;
+ const struct iwl_fw_cmd_version *ver;
+ u32 cmd_id;
+
+ if (!state->pos)
+ seq_puts(seq, "fw_api_ver:\n");
+
+ ver = &fw->ucode_capa.cmd_versions[state->pos];
+
+ cmd_id = WIDE_ID(ver->group, ver->cmd);
+
+ seq_printf(seq, " 0x%04x:\n", cmd_id);
+ seq_printf(seq, " name: %s\n",
+ iwl_get_cmd_string(priv->fwrt->trans, cmd_id));
+ seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver);
+ seq_printf(seq, " notif_ver: %d\n", ver->notif_ver);
+ return 0;
+}
+
+static const struct seq_operations iwl_dbgfs_info_seq_ops = {
+ .start = iwl_dbgfs_fw_info_seq_start,
+ .next = iwl_dbgfs_fw_info_seq_next,
+ .stop = iwl_dbgfs_fw_info_seq_stop,
+ .show = iwl_dbgfs_fw_info_seq_show,
+};
+
+static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp)
+{
+ struct iwl_dbgfs_fw_info_priv *priv;
+
+ priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops,
+ sizeof(*priv));
+
+ if (!priv)
+ return -ENOMEM;
+
+ priv->fwrt = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations iwl_dbgfs_fw_info_ops = {
+ .owner = THIS_MODULE,
+ .open = iwl_dbgfs_fw_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir)
+{
+ INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
+ FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
new file mode 100644
index 000000000..0248d40bc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include "runtime.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir);
+
+#else
+static inline void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir) { }
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
new file mode 100644
index 000000000..b90f1e9ce
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/devcoredump.h>
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "pnvm.h"
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 trm_hw_status0; /* TRM HW status */
+ u32 trm_hw_status1; /* TRM HW status */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 bcon_time; /* beacon timer */
+ u32 tsf_low; /* network timestamp function timer */
+ u32 tsf_hi; /* network timestamp function timer */
+ u32 gp1; /* GP1 timer register */
+ u32 gp2; /* GP2 timer register */
+ u32 fw_rev_type; /* firmware revision type */
+ u32 major; /* uCode version major */
+ u32 minor; /* uCode version minor */
+ u32 hw_ver; /* HW Silicon version */
+ u32 brd_ver; /* HW board version */
+ u32 log_pc; /* log program counter */
+ u32 frame_ptr; /* frame pointer */
+ u32 stack_ptr; /* stack pointer */
+ u32 hcmd; /* last host command header */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ u32 last_cmd_id; /* last HCMD id handled by the firmware */
+ u32 wait_event; /* wait event() caller address */
+ u32 l2p_control; /* L2pControlField */
+ u32 l2p_duration; /* L2pDurationField */
+ u32 l2p_mhvalid; /* L2pMhValidBits */
+ u32 l2p_addr_match; /* L2pAddrMatchStat */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ u32 flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 data3; /* error-specific data */
+ u32 umac_major;
+ u32 umac_minor;
+ u32 frame_pointer; /* core register 27*/
+ u32 stack_pointer; /* core register 28 */
+ u32 cmd_header; /* latest host cmd sent to UMAC */
+ u32 nic_isr_pref; /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_umac_error_event_table table = {};
+ u32 base = fwrt->trans->dbg.umac_error_event_table;
+ char pnvm_name[MAX_PNVM_NAME];
+
+ if (!base &&
+ !(fwrt->trans->dbg.error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_UMAC))
+ return;
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (table.valid)
+ fwrt->dump.umac_err_id = table.error_id;
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+ fwrt->trans->status, table.valid);
+ }
+
+ if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) ==
+ FW_SYSASSERT_PNVM_MISSING) {
+ iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
+ IWL_ERR(fwrt, "PNVM data is missing, please install %s\n",
+ pnvm_name);
+ }
+
+ IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
+ iwl_fw_lookup_assert_desc(table.error_id));
+ IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
+ IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
+ IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
+ IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
+ IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
+ IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
+}
+
+static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_error_event_table table = {};
+ u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num];
+
+ if (fwrt->cur_fw_img == IWL_UCODE_INIT) {
+ if (!base)
+ base = fwrt->fw->init_errlog_ptr;
+ } else {
+ if (!base)
+ base = fwrt->fw->inst_errlog_ptr;
+ }
+
+ if (base < 0x400000) {
+ IWL_ERR(fwrt,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (fwrt->cur_fw_img == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ /* check if there is a HW error */
+ val = iwl_trans_read_mem32(trans, base);
+ if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+ int err;
+
+ IWL_ERR(trans, "HW error, resetting before reading\n");
+
+ /* reset the device */
+ err = iwl_trans_sw_reset(trans, true);
+ if (err)
+ return;
+
+ err = iwl_finish_nic_init(trans);
+ if (err)
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (table.valid)
+ fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+ fwrt->trans->status, table.valid);
+ }
+
+ /* Do not change this output - scripts rely on it */
+
+ IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version);
+
+ IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
+ iwl_fw_lookup_assert_desc(table.error_id));
+ IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+ IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
+ IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
+ IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
+ IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
+ IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
+ IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
+ IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
+ IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
+ IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
+ IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
+ IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
+ IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
+ IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
+ IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/*
+ * TCM error struct.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_tcm_error_event_table {
+ u32 valid;
+ u32 error_id;
+ u32 blink2;
+ u32 ilink1;
+ u32 ilink2;
+ u32 data1, data2, data3;
+ u32 logpc;
+ u32 frame_pointer;
+ u32 stack_pointer;
+ u32 msgid;
+ u32 isr;
+ u32 hw_status[5];
+ u32 sw_status[1];
+ u32 reserved[4];
+} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
+
+static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_tcm_error_event_table table = {};
+ u32 base = fwrt->trans->dbg.tcm_error_event_table[idx];
+ int i;
+ u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 :
+ IWL_ERROR_EVENT_TABLE_TCM1;
+
+ if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
+ return;
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
+ IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
+ IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
+ IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
+ IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
+ IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
+ IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
+ for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
+ IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
+ table.hw_status[i], i);
+ for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
+ IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
+ table.sw_status[i], i);
+}
+
+/*
+ * RCM error struct.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_rcm_error_event_table {
+ u32 valid;
+ u32 error_id;
+ u32 blink2;
+ u32 ilink1;
+ u32 ilink2;
+ u32 data1, data2, data3;
+ u32 logpc;
+ u32 frame_pointer;
+ u32 stack_pointer;
+ u32 msgid;
+ u32 isr;
+ u32 frame_hw_status;
+ u32 mbx_lmac_to_rcm_req;
+ u32 mbx_rcm_to_lmac_req;
+ u32 mh_ctl;
+ u32 mh_addr1_lo;
+ u32 mh_info;
+ u32 mh_err;
+ u32 reserved[3];
+} __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */
+
+static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_rcm_error_event_table table = {};
+ u32 base = fwrt->trans->dbg.rcm_error_event_table[idx];
+ u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 :
+ IWL_ERROR_EVENT_TABLE_RCM1;
+
+ if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
+ return;
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
+ IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
+ IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
+ IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1);
+ IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2);
+ IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1);
+ IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2);
+ IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3);
+ IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc);
+ IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer);
+ IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer);
+ IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid);
+ IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr);
+ IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status);
+ IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n",
+ table.mbx_lmac_to_rcm_req);
+ IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n",
+ table.mbx_rcm_to_lmac_req);
+ IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl);
+ IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo);
+ IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info);
+ IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err);
+}
+
+static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ u32 error, data1;
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ error = UMAG_SB_CPU_2_STATUS;
+ data1 = UMAG_SB_CPU_1_STATUS;
+ } else if (fwrt->trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
+ error = SB_CPU_2_STATUS;
+ data1 = SB_CPU_1_STATUS;
+ } else {
+ return;
+ }
+
+ error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
+
+ IWL_ERR(trans, "IML/ROM dump:\n");
+
+ if (error & 0xFFFF0000)
+ IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
+
+ IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error);
+ IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
+ iwl_read_umac_prph(trans, data1));
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
+ iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
+}
+
+#define FSEQ_REG(x) { .addr = (x), .str = #x, }
+
+static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ int i;
+ struct {
+ u32 addr;
+ const char *str;
+ } fseq_regs[] = {
+ FSEQ_REG(FSEQ_ERROR_CODE),
+ FSEQ_REG(FSEQ_TOP_INIT_VERSION),
+ FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
+ FSEQ_REG(FSEQ_OTP_VERSION),
+ FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
+ FSEQ_REG(FSEQ_ALIVE_TOKEN),
+ FSEQ_REG(FSEQ_CNVI_ID),
+ FSEQ_REG(FSEQ_CNVR_ID),
+ FSEQ_REG(CNVI_AUX_MISC_CHIP),
+ FSEQ_REG(CNVR_AUX_MISC_CHIP),
+ FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
+ FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
+ };
+
+ if (!iwl_trans_grab_nic_access(trans))
+ return;
+
+ IWL_ERR(fwrt, "Fseq Registers:\n");
+
+ for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
+ IWL_ERR(fwrt, "0x%08X | %s\n",
+ iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
+ fseq_regs[i].str);
+
+ iwl_trans_release_nic_access(trans);
+}
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
+{
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+ IWL_ERR(fwrt,
+ "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+ return;
+ }
+
+ iwl_fwrt_dump_lmac_error_log(fwrt, 0);
+ if (fwrt->trans->dbg.lmac_error_event_table[1])
+ iwl_fwrt_dump_lmac_error_log(fwrt, 1);
+ iwl_fwrt_dump_umac_error_log(fwrt);
+ iwl_fwrt_dump_tcm_error_log(fwrt, 0);
+ iwl_fwrt_dump_rcm_error_log(fwrt, 0);
+ iwl_fwrt_dump_tcm_error_log(fwrt, 1);
+ iwl_fwrt_dump_rcm_error_log(fwrt, 1);
+ iwl_fwrt_dump_iml_error_log(fwrt);
+ iwl_fwrt_dump_fseq_regs(fwrt);
+
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH);
+
+ IWL_ERR(fwrt, "Function Scratch status:\n");
+ IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
new file mode 100644
index 000000000..2d481849a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2014-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __fw_error_dump_h__
+#define __fw_error_dump_h__
+
+#include <linux/types.h>
+#include "fw/api/cmdhdr.h"
+
+#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
+#define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633
+
+/**
+ * enum iwl_fw_error_dump_type - types of data in the dump file
+ * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
+ * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
+ * &struct iwl_fw_error_dump_txcmd packets
+ * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
+ * info on the device / firmware.
+ * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
+ * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
+ * sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
+ * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
+ * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
+ * Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ * &struct iwl_fw_error_dump_rb
+ * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ * paged to the DRAM.
+ * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
+ * for that reason is not in use in any other place in the Linux Wi-Fi
+ * stack.
+ * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
+ * which we get from the fw after ALIVE. The content is structured as
+ * &struct iwl_fw_error_dump_smem_cfg.
+ */
+enum iwl_fw_error_dump_type {
+ /* 0 is deprecated */
+ IWL_FW_ERROR_DUMP_CSR = 1,
+ IWL_FW_ERROR_DUMP_RXF = 2,
+ IWL_FW_ERROR_DUMP_TXCMD = 3,
+ IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
+ IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
+ IWL_FW_ERROR_DUMP_PRPH = 6,
+ IWL_FW_ERROR_DUMP_TXF = 7,
+ IWL_FW_ERROR_DUMP_FH_REGS = 8,
+ IWL_FW_ERROR_DUMP_MEM = 9,
+ IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+ IWL_FW_ERROR_DUMP_RB = 11,
+ IWL_FW_ERROR_DUMP_PAGING = 12,
+ IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+ IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
+ IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
+ IWL_FW_ERROR_DUMP_MEM_CFG = 16,
+ IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
+
+ IWL_FW_ERROR_DUMP_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: &enum iwl_fw_error_dump_type
+ * @len: the length starting from %data
+ * @data: the data itself
+ */
+struct iwl_fw_error_dump_data {
+ __le32 type;
+ __le32 len;
+ __u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_file - the layout of the header of the file
+ * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file starting from %barker
+ * @data: array of &struct iwl_fw_error_dump_data
+ */
+struct iwl_fw_error_dump_file {
+ __le32 barker;
+ __le32 file_len;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_txcmd - TX command data
+ * @cmdlen: original length of command
+ * @caplen: captured length of command (may be less)
+ * @data: captured command data, @caplen bytes
+ */
+struct iwl_fw_error_dump_txcmd {
+ __le32 cmdlen;
+ __le32 caplen;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_fifo - RX/TX FIFO data
+ * @fifo_num: number of FIFO (starting from 0)
+ * @available_bytes: num of bytes available in FIFO (may be less than FIFO size)
+ * @wr_ptr: position of write pointer
+ * @rd_ptr: position of read pointer
+ * @fence_ptr: position of fence pointer
+ * @fence_mode: the current mode of the fence (before locking) -
+ * 0=follow RD pointer ; 1 = freeze
+ * @data: all of the FIFO's data
+ */
+struct iwl_fw_error_dump_fifo {
+ __le32 fifo_num;
+ __le32 available_bytes;
+ __le32 wr_ptr;
+ __le32 rd_ptr;
+ __le32 fence_ptr;
+ __le32 fence_mode;
+ u8 data[];
+} __packed;
+
+enum iwl_fw_error_dump_family {
+ IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
+ IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
+};
+
+#define MAX_NUM_LMAC 2
+
+/**
+ * struct iwl_fw_error_dump_info - info on the device / firmware
+ * @hw_type: the type of the device
+ * @hw_step: the step of the device
+ * @fw_human_readable: human readable FW version
+ * @dev_human_readable: name of the device
+ * @bus_human_readable: name of the bus used
+ * @num_of_lmacs: the number of lmacs
+ * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump
+ * if the dump collection was not initiated by an assert, the value is 0
+ * @umac_err_id: the umac error_id/rt_status that triggered the latest dump
+ * if the dump collection was not initiated by an assert, the value is 0
+ */
+struct iwl_fw_error_dump_info {
+ __le32 hw_type;
+ __le32 hw_step;
+ u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
+ u8 dev_human_readable[64];
+ u8 bus_human_readable[8];
+ u8 num_of_lmacs;
+ __le32 umac_err_id;
+ __le32 lmac_err_id[MAX_NUM_LMAC];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_fw_mon - FW monitor data
+ * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
+ * @fw_mon_base_ptr: base pointer of the data
+ * @fw_mon_cycle_cnt: number of wraparounds
+ * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit
+ * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold
+ * MSB 32 bits
+ * @reserved: for future use
+ * @data: captured data
+ */
+struct iwl_fw_error_dump_fw_mon {
+ __le32 fw_mon_wr_ptr;
+ __le32 fw_mon_base_ptr;
+ __le32 fw_mon_cycle_cnt;
+ __le32 fw_mon_base_high_ptr;
+ __le32 reserved[2];
+ u8 data[];
+} __packed;
+
+#define MAX_NUM_LMAC 2
+#define TX_FIFO_INTERNAL_MAX_NUM 6
+#define TX_FIFO_MAX_NUM 15
+/**
+ * struct iwl_fw_error_dump_smem_cfg - Dump SMEM configuration
+ * This must follow &struct iwl_fwrt_shared_mem_cfg.
+ * @num_lmacs: number of lmacs
+ * @num_txfifo_entries: number of tx fifos
+ * @lmac: sizes of lmacs txfifos and rxfifo1
+ * @rxfifo2_size: size of rxfifo2
+ * @internal_txfifo_addr: address of internal tx fifo
+ * @internal_txfifo_size: size of internal tx fifo
+ */
+struct iwl_fw_error_dump_smem_cfg {
+ __le32 num_lmacs;
+ __le32 num_txfifo_entries;
+ struct {
+ __le32 txfifo_size[TX_FIFO_MAX_NUM];
+ __le32 rxfifo1_size;
+ } lmac[MAX_NUM_LMAC];
+ __le32 rxfifo2_size;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed;
+/**
+ * struct iwl_fw_error_dump_prph - periphery registers data
+ * @prph_start: address of the first register in this chunk
+ * @data: the content of the registers
+ */
+struct iwl_fw_error_dump_prph {
+ __le32 prph_start;
+ __le32 data[];
+};
+
+enum iwl_fw_error_dump_mem_type {
+ IWL_FW_ERROR_DUMP_MEM_SRAM,
+ IWL_FW_ERROR_DUMP_MEM_SMEM,
+ IWL_FW_ERROR_DUMP_MEM_NAMED_MEM = 10,
+};
+
+/**
+ * struct iwl_fw_error_dump_mem - chunk of memory
+ * @type: &enum iwl_fw_error_dump_mem_type
+ * @offset: the offset from which the memory was read
+ * @data: the content of the memory
+ */
+struct iwl_fw_error_dump_mem {
+ __le32 type;
+ __le32 offset;
+ u8 data[];
+};
+
+/* Dump version, used by the dump parser to differentiate between
+ * different dump formats
+ */
+#define IWL_INI_DUMP_VER 1
+
+/* Use bit 31 as dump info type to avoid colliding with region types */
+#define IWL_INI_DUMP_INFO_TYPE BIT(31)
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: &enum iwl_fw_ini_region_type
+ * @sub_type: sub type id
+ * @sub_type_ver: sub type version
+ * @reserved: not in use
+ * @len: the length starting from %data
+ * @data: the data itself
+ */
+struct iwl_fw_ini_error_dump_data {
+ u8 type;
+ u8 sub_type;
+ u8 sub_type_ver;
+ u8 reserved;
+ __le32 len;
+ __u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_ini_dump_entry
+ * @list: list of dump entries
+ * @size: size of the data
+ * @data: entry data
+ */
+struct iwl_fw_ini_dump_entry {
+ struct list_head list;
+ u32 size;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_file - header of dump file
+ * @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file including the header
+ */
+struct iwl_fw_ini_dump_file_hdr {
+ __le32 barker;
+ __le32 file_len;
+} __packed;
+
+/**
+ * struct iwl_fw_ini_fifo_hdr - fifo range header
+ * @fifo_num: the fifo number. In case of umac rx fifo, set BIT(31) to
+ * distinguish between lmac and umac rx fifos
+ * @num_of_registers: num of registers to dump, dword size each
+ */
+struct iwl_fw_ini_fifo_hdr {
+ __le32 fifo_num;
+ __le32 num_of_registers;
+} __packed;
+
+/**
+ * struct iwl_fw_ini_error_dump_range - range of memory
+ * @range_data_size: the size of this range, in bytes
+ * @internal_base_addr: base address of internal memory range
+ * @dram_base_addr: base address of dram monitor range
+ * @page_num: page number of memory range
+ * @fifo_hdr: fifo header of memory range
+ * @fw_pkt: FW packet header of memory range
+ * @data: the actual memory
+ */
+struct iwl_fw_ini_error_dump_range {
+ __le32 range_data_size;
+ union {
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
+ struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ struct iwl_cmd_header fw_pkt_hdr;
+ };
+ __le32 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_ini_error_dump_header - ini region dump header
+ * @version: dump version
+ * @region_id: id of the region
+ * @num_of_ranges: number of ranges in this region
+ * @name_len: number of bytes allocated to the name string of this region
+ * @name: name of the region
+ */
+struct iwl_fw_ini_error_dump_header {
+ __le32 version;
+ __le32 region_id;
+ __le32 num_of_ranges;
+ __le32 name_len;
+ u8 name[IWL_FW_INI_MAX_NAME];
+};
+
+/**
+ * struct iwl_fw_ini_error_dump - ini region dump
+ * @header: the header of this region
+ * @data: data of memory ranges in this region,
+ * see &struct iwl_fw_ini_error_dump_range
+ */
+struct iwl_fw_ini_error_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ u8 data[];
+} __packed;
+
+/* This bit is used to differentiate between lmac and umac rxf */
+#define IWL_RXF_UMAC_BIT BIT(31)
+
+/**
+ * struct iwl_fw_ini_error_dump_register - ini register dump
+ * @addr: address of the register
+ * @data: data of the register
+ */
+struct iwl_fw_ini_error_dump_register {
+ __le32 addr;
+ __le32 data;
+} __packed;
+
+/**
+ * struct iwl_fw_ini_dump_cfg_name - configuration name
+ * @image_type: image type the configuration is related to
+ * @cfg_name_len: length of the configuration name
+ * @cfg_name: name of the configuraiton
+ */
+struct iwl_fw_ini_dump_cfg_name {
+ __le32 image_type;
+ __le32 cfg_name_len;
+ u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
+} __packed;
+
+/* AX210's HW type */
+#define IWL_AX210_HW_TYPE 0x42
+/* How many bits to roll when adding to the HW type of AX210 HW */
+#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12
+
+/* struct iwl_fw_ini_dump_info - ini dump information
+ * @version: dump version
+ * @time_point: time point that caused the dump collection
+ * @trigger_reason: reason of the trigger
+ * @external_cfg_state: &enum iwl_ini_cfg_state
+ * @ver_type: FW version type
+ * @ver_subtype: FW version subype
+ * @hw_step: HW step
+ * @hw_type: HW type
+ * @rf_id_flavor: HW RF id flavor
+ * @rf_id_dash: HW RF id dash
+ * @rf_id_step: HW RF id step
+ * @rf_id_type: HW RF id type
+ * @lmac_major: lmac major version
+ * @lmac_minor: lmac minor version
+ * @umac_major: umac major version
+ * @umac_minor: umac minor version
+ * @fw_mon_mode: FW monitor mode &enum iwl_fw_ini_buffer_location
+ * @regions_mask: bitmap mask of regions ids in the dump
+ * @build_tag_len: length of the build tag
+ * @build_tag: build tag string
+ * @num_of_cfg_names: number of configuration name structs
+ * @cfg_names: configuration names
+ */
+struct iwl_fw_ini_dump_info {
+ __le32 version;
+ __le32 time_point;
+ __le32 trigger_reason;
+ __le32 external_cfg_state;
+ __le32 ver_type;
+ __le32 ver_subtype;
+ __le32 hw_step;
+ __le32 hw_type;
+ __le32 rf_id_flavor;
+ __le32 rf_id_dash;
+ __le32 rf_id_step;
+ __le32 rf_id_type;
+ __le32 lmac_major;
+ __le32 lmac_minor;
+ __le32 umac_major;
+ __le32 umac_minor;
+ __le32 fw_mon_mode;
+ __le64 regions_mask;
+ __le32 build_tag_len;
+ u8 build_tag[FW_VER_HUMAN_READABLE_SZ];
+ __le32 num_of_cfg_names;
+ struct iwl_fw_ini_dump_cfg_name cfg_names[];
+} __packed;
+
+/**
+ * struct iwl_fw_ini_err_table_dump - ini error table dump
+ * @header: header of the region
+ * @version: error table version
+ * @data: data of memory ranges in this region,
+ * see &struct iwl_fw_ini_error_dump_range
+ */
+struct iwl_fw_ini_err_table_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ __le32 version;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+ __le32 index;
+ __le32 rxq;
+ __le32 reserved;
+ u8 data[];
+};
+
+/**
+ * struct iwl_fw_ini_monitor_dump - ini monitor dump
+ * @header: header of the region
+ * @write_ptr: write pointer position in the buffer
+ * @cycle_cnt: cycles count
+ * @cur_frag: current fragment in use
+ * @data: data of memory ranges in this region,
+ * see &struct iwl_fw_ini_error_dump_range
+ */
+struct iwl_fw_ini_monitor_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ __le32 write_ptr;
+ __le32 cycle_cnt;
+ __le32 cur_frag;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_ini_special_device_memory - special device memory
+ * @header: header of the region
+ * @type: type of special memory
+ * @version: struct special memory version
+ * @data: data of memory ranges in this region,
+ * see &struct iwl_fw_ini_error_dump_range
+ */
+struct iwl_fw_ini_special_device_memory {
+ struct iwl_fw_ini_error_dump_header header;
+ __le16 type;
+ __le16 version;
+ u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_paging - content of the UMAC's image page
+ * block on DRAM
+ * @index: the index of the page block
+ * @reserved:
+ * @data: the content of the page block
+ */
+struct iwl_fw_error_dump_paging {
+ __le32 index;
+ __le32 reserved;
+ u8 data[];
+};
+
+/**
+ * iwl_fw_error_next_data - advance fw error dump data pointer
+ * @data: previous data block
+ * Returns: next data block
+ */
+static inline struct iwl_fw_error_dump_data *
+iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
+{
+ return (void *)(data->data + le32_to_cpu(data->len));
+}
+
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ * This should not be defined as a trigger to the driver, but a value the
+ * driver should set to indicate that the trigger was initiated by the
+ * user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ * missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ * command response or a notification.
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ * goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ * detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ * events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
+ * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
+ * threshold.
+ * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ * the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
+ * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
+ * in the driver.
+ */
+enum iwl_fw_dbg_trigger {
+ FW_DBG_TRIGGER_INVALID = 0,
+ FW_DBG_TRIGGER_USER,
+ FW_DBG_TRIGGER_FW_ASSERT,
+ FW_DBG_TRIGGER_MISSED_BEACONS,
+ FW_DBG_TRIGGER_CHANNEL_SWITCH,
+ FW_DBG_TRIGGER_FW_NOTIF,
+ FW_DBG_TRIGGER_MLME,
+ FW_DBG_TRIGGER_STATS,
+ FW_DBG_TRIGGER_RSSI,
+ FW_DBG_TRIGGER_TXQ_TIMERS,
+ FW_DBG_TRIGGER_TIME_EVENT,
+ FW_DBG_TRIGGER_BA,
+ FW_DBG_TRIGGER_TX_LATENCY,
+ FW_DBG_TRIGGER_TDLS,
+ FW_DBG_TRIGGER_TX_STATUS,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT,
+ FW_DBG_TRIGGER_DRIVER,
+
+ /* must be last */
+ FW_DBG_TRIGGER_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
+ * @type: &enum iwl_fw_dbg_trigger
+ * @data: raw data about what happened
+ */
+struct iwl_fw_error_dump_trigger_desc {
+ __le32 type;
+ u8 data[];
+};
+
+#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
new file mode 100644
index 000000000..a7817d952
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -0,0 +1,955 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2008-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_file_h__
+#define __iwl_fw_file_h__
+
+#include <linux/netdevice.h>
+#include <linux/nl80211.h>
+
+/* v1/v2 uCode file layout */
+struct iwl_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ union {
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+ struct {
+ __le32 build; /* build number */
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v2;
+ } u;
+};
+
+#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWL_UCODE_TLV_CONST_BASE 0x100
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data.
+ */
+
+enum iwl_ucode_tlv_type {
+ IWL_UCODE_TLV_INVALID = 0, /* unused */
+ IWL_UCODE_TLV_INST = 1,
+ IWL_UCODE_TLV_DATA = 2,
+ IWL_UCODE_TLV_INIT = 3,
+ IWL_UCODE_TLV_INIT_DATA = 4,
+ IWL_UCODE_TLV_BOOT = 5,
+ IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
+ IWL_UCODE_TLV_PAN = 7, /* deprecated -- only used in DVM */
+ IWL_UCODE_TLV_MEM_DESC = 7, /* replaces PAN in non-DVM */
+ IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
+ IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
+ IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
+ IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
+ IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
+ IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
+ IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
+ IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
+ IWL_UCODE_TLV_WOWLAN_INST = 16,
+ IWL_UCODE_TLV_WOWLAN_DATA = 17,
+ IWL_UCODE_TLV_FLAGS = 18,
+ IWL_UCODE_TLV_SEC_RT = 19,
+ IWL_UCODE_TLV_SEC_INIT = 20,
+ IWL_UCODE_TLV_SEC_WOWLAN = 21,
+ IWL_UCODE_TLV_DEF_CALIB = 22,
+ IWL_UCODE_TLV_PHY_SKU = 23,
+ IWL_UCODE_TLV_SECURE_SEC_RT = 24,
+ IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
+ IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+ IWL_UCODE_TLV_NUM_OF_CPU = 27,
+ IWL_UCODE_TLV_CSCHEME = 28,
+ IWL_UCODE_TLV_API_CHANGES_SET = 29,
+ IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
+ IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
+ IWL_UCODE_TLV_PAGING = 32,
+ IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
+ /* 35 is unused */
+ IWL_UCODE_TLV_FW_VERSION = 36,
+ IWL_UCODE_TLV_FW_DBG_DEST = 38,
+ IWL_UCODE_TLV_FW_DBG_CONF = 39,
+ IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
+ IWL_UCODE_TLV_CMD_VERSIONS = 48,
+ IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
+ IWL_UCODE_TLV_FW_MEM_SEG = 51,
+ IWL_UCODE_TLV_IML = 52,
+ IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
+ IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
+ IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
+ IWL_UCODE_TLV_HW_TYPE = 58,
+ IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+ IWL_UCODE_TLV_PHY_INTEGRATION_VERSION = 61,
+
+ IWL_UCODE_TLV_PNVM_VERSION = 62,
+ IWL_UCODE_TLV_PNVM_SKU = 64,
+
+ IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
+ IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
+
+ IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
+
+ IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
+ IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
+ IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
+ IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3,
+ IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4,
+ IWL_UCODE_TLV_TYPE_CONF_SET = IWL_UCODE_TLV_DEBUG_BASE + 5,
+ IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_TRIGGERS,
+
+ /* TLVs 0x1000-0x2000 are for internal driver usage */
+ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
+};
+
+struct iwl_ucode_tlv {
+ __le32 type; /* see above */
+ __le32 length; /* not including type/length fields */
+ u8 data[];
+};
+
+#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
+#define FW_VER_HUMAN_READABLE_SZ 64
+
+struct iwl_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ __le32 zero;
+ __le32 magic;
+ u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+ /* major/minor/API/serial or major in new format */
+ __le32 ver;
+ __le32 build;
+ __le64 ignore;
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ u8 data[];
+};
+
+/*
+ * ucode TLVs
+ *
+ * ability to get extension for: flags & capabilities from ucode binaries files
+ */
+struct iwl_ucode_api {
+ __le32 api_index;
+ __le32 api_flags;
+} __packed;
+
+struct iwl_ucode_capa {
+ __le32 api_index;
+ __le32 api_capa;
+} __packed;
+
+/**
+ * enum iwl_ucode_tlv_flag - ucode API flags
+ * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of block list instead of 64 in scan
+ * offload profile config command.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ */
+enum iwl_ucode_tlv_flag {
+ IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+ IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+ IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
+ IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+ IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
+ IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+};
+
+typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
+ * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
+ * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ * ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
+ * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
+ * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
+ * indicating low latency direction.
+ * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is
+ * deprecated.
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8
+ * of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8
+ * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS
+ * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of
+ * the REDUCE_TX_POWER_CMD.
+ * @IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF: This ucode supports the short
+ * version of the beacon notification.
+ * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of
+ * BEACON_FILTER_CONFIG_API_S_VER_4.
+ * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of
+ * REGULATORY_NVM_GET_INFO_RSP_API_S.
+ * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of
+ * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S.
+ * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of
+ * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of
+ * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
+ * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
+ * STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar
+ * version tables.
+ * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ * SCAN_CONFIG_DB_CMD_API_S.
+ *
+ * @NUM_IWL_UCODE_TLV_API: number of bits used
+ */
+enum iwl_ucode_tlv_api {
+ /* API Set 0 */
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
+ IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
+ IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
+ /* API Set 1 */
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
+ IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33,
+ IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
+ IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
+ IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36,
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42,
+ IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
+ IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46,
+ IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47,
+ IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
+ IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
+ IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
+ IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
+ IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
+
+
+#ifdef __CHECKER__
+ /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_API 128
+#else
+ NUM_IWL_UCODE_TLV_API
+#endif
+};
+
+typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
+ * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
+ * is standalone or with a BSS station interface in the same binding.
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ * sources for the MCC. This TLV bit is a future replacement to
+ * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ * is supported.
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
+ * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting
+ * stabilization latency for SoCs.
+ * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
+ * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
+ * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2
+ * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command
+ * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
+ * (6 GHz).
+ * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command
+ * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA
+ * countdown offloading. Beacon notifications are not sent to the host.
+ * The fw also offloads TBTT alignment.
+ * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
+ * antenna the beacon should be transmitted
+ * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ * from AP and will send it upon d0i3 exit.
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3
+ * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ * thresholds reporting
+ * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ * regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
+ * command size (command version 4) that supports toggling ACK TX
+ * power reduction.
+ * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
+ * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
+ * capability.
+ * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured
+ * to report the CSI information with (certain) RX frames
+ * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both
+ * initiator and responder
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
+ * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in
+ * reset flow
+ * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
+ * channels even when these are not enabled.
+ * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
+ * complete to FW.
+ *
+ * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
+ */
+enum iwl_ucode_tlv_capa {
+ /* set 0 */
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
+ IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
+ IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
+ IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
+
+ /* set 1 */
+ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
+ IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
+ IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46,
+ IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47,
+ IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
+ IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
+ IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
+ IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
+ IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
+ IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
+ IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
+ IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
+ IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59,
+ IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
+ IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
+ IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
+
+ /* set 2 */
+ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
+ IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
+ IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
+ IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68,
+ IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
+ IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
+ IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
+ IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
+ IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
+ IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
+ IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
+ IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
+ IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
+ IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
+ IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90,
+ IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)92,
+ IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93,
+
+ /* set 3 */
+ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
+
+ /*
+ * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels
+ */
+ IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
+
+ IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
+
+#ifdef __CHECKER__
+ /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_CAPA 128
+#else
+ NUM_IWL_UCODE_TLV_CAPA
+#endif
+};
+
+/* The default calibrate table size if not specified by firmware file */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
+
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/**
+ * struct iwl_tlv_calib_ctrl - Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
+enum iwl_fw_phy_cfg {
+ FW_PHY_CFG_RADIO_TYPE_POS = 0,
+ FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
+ FW_PHY_CFG_RADIO_STEP_POS = 2,
+ FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
+ FW_PHY_CFG_RADIO_DASH_POS = 4,
+ FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
+ FW_PHY_CFG_TX_CHAIN_POS = 16,
+ FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
+ FW_PHY_CFG_RX_CHAIN_POS = 20,
+ FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+ FW_PHY_CFG_CHAIN_SAD_POS = 23,
+ FW_PHY_CFG_CHAIN_SAD_ENABLED = 0x1 << FW_PHY_CFG_CHAIN_SAD_POS,
+ FW_PHY_CFG_CHAIN_SAD_ANT_A = 0x2 << FW_PHY_CFG_CHAIN_SAD_POS,
+ FW_PHY_CFG_CHAIN_SAD_ANT_B = 0x4 << FW_PHY_CFG_CHAIN_SAD_POS,
+ FW_PHY_CFG_SHARED_CLK = BIT(31),
+};
+
+enum iwl_fw_dbg_reg_operator {
+ CSR_ASSIGN,
+ CSR_SETBIT,
+ CSR_CLEARBIT,
+
+ PRPH_ASSIGN,
+ PRPH_SETBIT,
+ PRPH_CLEARBIT,
+
+ INDIRECT_ASSIGN,
+ INDIRECT_SETBIT,
+ INDIRECT_CLEARBIT,
+
+ PRPH_BLOCKBIT,
+};
+
+/**
+ * struct iwl_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: &enum iwl_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwl_fw_dbg_reg_op {
+ u8 op;
+ u8 reserved[3];
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
+ */
+enum iwl_fw_dbg_monitor_mode {
+ SMEM_MODE = 0,
+ EXTERNAL_MODE = 1,
+ MARBH_MODE = 2,
+ MIPI_MODE = 3,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: the memory segment type to record
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+ __le32 data_type;
+ __le32 ofs;
+ __le32 len;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: &enum iwl_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg: addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWL_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwl_fw_dbg_dest_tlv_v1 {
+ u8 version;
+ u8 monitor_mode;
+ u8 size_power;
+ u8 reserved;
+ __le32 base_reg;
+ __le32 end_reg;
+ __le32 write_ptr_reg;
+ __le32 wrap_count;
+ u8 base_shift;
+ u8 end_shift;
+ struct iwl_fw_dbg_reg_op reg_ops[];
+} __packed;
+
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWL_M2S_UNIT_SIZE 0x100
+
+struct iwl_fw_dbg_dest_tlv {
+ u8 version;
+ u8 monitor_mode;
+ u8 size_power;
+ u8 reserved;
+ __le32 cfg_reg;
+ __le32 write_ptr_reg;
+ __le32 wrap_count;
+ u8 base_shift;
+ u8 size_shift;
+ struct iwl_fw_dbg_reg_op reg_ops[];
+} __packed;
+
+struct iwl_fw_dbg_conf_hcmd {
+ u8 id;
+ u8 reserved;
+ __le16 len;
+ u8 data[];
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_trigger_mode - triggers functionalities
+ *
+ * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ * collect only monitor data
+ */
+enum iwl_fw_dbg_trigger_mode {
+ IWL_FW_DBG_TRIGGER_START = BIT(0),
+ IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+ IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_flags - the flags supported by wrt triggers
+ * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart
+ */
+enum iwl_fw_dbg_trigger_flags {
+ IWL_FW_DBG_FORCE_RESTART = BIT(0),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWL_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ */
+enum iwl_fw_dbg_trigger_vif_type {
+ IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
+ IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
+ IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
+ IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
+ IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
+ IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
+ IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
+};
+
+/**
+ * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: &enum iwl_fw_dbg_trigger
+ * @vif_type: &enum iwl_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ * to the currently running configuration is set, the data should be
+ * collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ * after the STOP trigger fires.
+ * @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
+ * configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ * @trig_dis_ms: the time, in milliseconds, after an occurrence of this
+ * trigger in which another occurrence should be ignored.
+ * @flags: &enum iwl_fw_dbg_trigger_flags
+ */
+struct iwl_fw_dbg_trigger_tlv {
+ __le32 id;
+ __le32 vif_type;
+ __le32 stop_conf_ids;
+ __le32 stop_delay;
+ u8 mode;
+ u8 start_conf_id;
+ __le16 occurrences;
+ __le16 trig_dis_ms;
+ u8 flags;
+ u8 reserved[5];
+
+ u8 data[];
+} __packed;
+
+#define FW_DBG_START_FROM_ALIVE 0
+#define FW_DBG_CONF_MAX 32
+#define FW_DBG_INVALID 0xff
+
+/**
+ * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwl_fw_dbg_trigger_missed_bcon {
+ __le32 stop_consec_missed_bcon;
+ __le32 stop_consec_missed_bcon_since_rx;
+ __le32 reserved2[2];
+ __le32 start_consec_missed_bcon;
+ __le32 start_consec_missed_bcon_since_rx;
+ __le32 reserved1[2];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_cmd {
+ struct cmd {
+ u8 cmd_id;
+ u8 group_id;
+ } __packed cmds[16];
+} __packed;
+
+/**
+ * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwl_fw_dbg_trigger_stats {
+ __le32 stop_offset;
+ __le32 stop_threshold;
+ __le32 start_offset;
+ __le32 start_threshold;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwl_fw_dbg_trigger_low_rssi {
+ __le32 rssi;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwl_fw_dbg_trigger_mlme {
+ u8 stop_auth_denied;
+ u8 stop_auth_timeout;
+ u8 stop_rx_deauth;
+ u8 stop_tx_deauth;
+
+ u8 stop_assoc_denied;
+ u8 stop_assoc_timeout;
+ u8 stop_connection_loss;
+ u8 reserved;
+
+ u8 start_auth_denied;
+ u8 start_auth_timeout;
+ u8 start_rx_deauth;
+ u8 start_tx_deauth;
+
+ u8 start_assoc_denied;
+ u8 start_assoc_timeout;
+ u8 start_connection_loss;
+ u8 reserved2;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwl_fw_dbg_trigger_txq_timer {
+ __le32 command_queue;
+ __le32 bss;
+ __le32 softap;
+ __le32 p2p_go;
+ __le32 p2p_client;
+ __le32 p2p_device;
+ __le32 ibss;
+ __le32 tdls;
+ __le32 reserved[4];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ * trigger each time a time event notification that relates to time event
+ * id with one of the actions in the bitmap is received and
+ * BIT(notif->status) is set in status_bitmap.
+ *
+ */
+struct iwl_fw_dbg_trigger_time_event {
+ struct {
+ __le32 id;
+ __le32 action_bitmap;
+ __le32 status_bitmap;
+ } __packed time_events[16];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ * when a frame times out in the reordering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+ __le16 rx_ba_start;
+ __le16 rx_ba_stop;
+ __le16 tx_ba_start;
+ __le16 tx_ba_stop;
+ __le16 rx_bar;
+ __le16 tx_bar;
+ __le16 frame_timeout;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_tdls - configures trigger for TDLS events.
+ * @action_bitmap: the TDLS action to trigger the collection upon
+ * @peer_mode: trigger on specific peer or all
+ * @peer: the TDLS peer to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tdls {
+ u8 action_bitmap;
+ u8 peer_mode;
+ u8 peer[ETH_ALEN];
+ u8 reserved[4];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response
+ * status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tx_status {
+ struct tx_status {
+ u8 status;
+ u8 reserved[3];
+ } __packed statuses[16];
+ __le32 reserved[2];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ * If there is more than one HCMD to send, they will appear one after the
+ * other and be sent in the order that they appear in.
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %FW_DBG_CONF_MAX configuration per run.
+ */
+struct iwl_fw_dbg_conf_tlv {
+ u8 id;
+ u8 usniffer;
+ u8 reserved;
+ u8 num_of_hcmds;
+ struct iwl_fw_dbg_conf_hcmd hcmd;
+} __packed;
+
+#define IWL_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwl_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwl_fw_cmd_version {
+ u8 cmd;
+ u8 group;
+ u8 cmd_ver;
+ u8 notif_ver;
+} __packed;
+
+struct iwl_fw_tcm_error_addr {
+ __le32 addr;
+}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
+
+struct iwl_fw_dump_exclude {
+ __le32 addr, size;
+};
+
+static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
+ size_t fixed_size, size_t var_size)
+{
+ size_t var_len = le32_to_cpu(tlv->length) - fixed_size;
+
+ if (WARN_ON(var_len % var_size))
+ return 0;
+
+ return var_len / var_size;
+}
+
+#define iwl_tlv_array_len(_tlv_ptr, _struct_ptr, _memb) \
+ _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \
+ sizeof(_struct_ptr->_memb[0]))
+
+#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
new file mode 100644
index 000000000..b7deca05a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2019 - 2021 Intel Corporation
+ */
+#include <fw/api/commands.h>
+#include "img.h"
+
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+ /* prior to LONG_GROUP, we never used this CMD version API */
+ u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
+ u8 cmd = iwl_cmd_opcode(cmd_id);
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->cmd_ver;
+ }
+ }
+
+ return def;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
+
+u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+{
+ const struct iwl_fw_cmd_version *entry;
+ unsigned int i;
+
+ if (!fw->ucode_capa.cmd_versions ||
+ !fw->ucode_capa.n_cmd_versions)
+ return def;
+
+ entry = fw->ucode_capa.cmd_versions;
+ for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
+ return entry->notif_ver;
+ }
+ }
+
+ return def;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
+
+static const struct {
+ const char *name;
+ u32 num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+const char *iwl_fw_lookup_assert_desc(u32 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_assert_desc);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
new file mode 100644
index 000000000..f878ac508
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016 Intel Deutschland GmbH
+ */
+#ifndef __iwl_fw_img_h__
+#define __iwl_fw_img_h__
+#include <linux/types.h>
+
+#include "api/dbg-tlv.h"
+
+#include "file.h"
+#include "error-dump.h"
+
+/**
+ * enum iwl_ucode_type
+ *
+ * The type of ucode.
+ *
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
+ */
+enum iwl_ucode_type {
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
+ IWL_UCODE_REGULAR_USNIFFER,
+ IWL_UCODE_TYPE_MAX,
+};
+
+/*
+ * enumeration of ucode section.
+ * This enumeration is used directly for older firmware (before 16.0).
+ * For new firmware, there can be up to 4 sections (see below) but the
+ * first one packaged into the firmware file is the DATA section and
+ * some debugging code accesses that.
+ */
+enum iwl_ucode_sec {
+ IWL_UCODE_SECTION_DATA,
+ IWL_UCODE_SECTION_INST,
+};
+
+struct iwl_ucode_capabilities {
+ u32 max_probe_length;
+ u32 n_scan_channels;
+ u32 standard_phy_calibration_size;
+ u32 flags;
+ u32 error_log_addr;
+ u32 error_log_size;
+ u32 num_stations;
+ unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
+ unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
+
+ const struct iwl_fw_cmd_version *cmd_versions;
+ u32 n_cmd_versions;
+};
+
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+ iwl_ucode_tlv_api_t api)
+{
+ return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+ iwl_ucode_tlv_capa_t capa)
+{
+ return test_bit((__force long)capa, capabilities->_capa);
+}
+
+/* one for each uCode image (inst/data, init/runtime/wowlan) */
+struct fw_desc {
+ const void *data; /* vmalloc'ed data */
+ u32 len; /* size in bytes */
+ u32 offset; /* offset in the device */
+};
+
+struct fw_img {
+ struct fw_desc *sec;
+ int num_sec;
+ bool is_dual_cpus;
+ u32 paging_mem_size;
+};
+
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
+#define PAGING_TLV_SECURE_MASK 1
+
+/* FW MSB Mask for regions/cache_control */
+#define FW_ADDR_CACHE_CONTROL 0xC0000000UL
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ * @fw_offs: offset in the device
+ */
+struct iwl_fw_paging {
+ dma_addr_t fw_paging_phys;
+ struct page *fw_paging_block;
+ u32 fw_paging_size;
+ u32 fw_offs;
+};
+
+/**
+ * enum iwl_fw_type - iwlwifi firmware type
+ * @IWL_FW_DVM: DVM firmware
+ * @IWL_FW_MVM: MVM firmware
+ */
+enum iwl_fw_type {
+ IWL_FW_DVM,
+ IWL_FW_MVM,
+};
+
+/**
+ * struct iwl_fw_dbg - debug data
+ *
+ * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM)
+ * @n_dest_reg: num of reg_ops in dest_tlv
+ * @conf_tlv: array of pointers to configuration HCMDs
+ * @trigger_tlv: array of pointers to triggers TLVs
+ * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
+ * @mem_tlv: Runtime addresses to dump
+ * @n_mem_tlv: number of runtime addresses
+ * @dump_mask: bitmask of dump regions
+*/
+struct iwl_fw_dbg {
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
+ u8 n_dest_reg;
+ struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX];
+ size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *mem_tlv;
+ size_t n_mem_tlv;
+ u32 dump_mask;
+};
+
+struct iwl_dump_exclude {
+ u32 addr, size;
+};
+
+/**
+ * struct iwl_fw - variables associated with the firmware
+ *
+ * @ucode_ver: ucode version from the ucode file
+ * @fw_version: firmware version string
+ * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan.
+ * @iml_len: length of the image loader image
+ * @iml: image loader fw image
+ * @ucode_capa: capabilities parsed from the ucode file.
+ * @enhance_sensitivity_table: device can do enhanced sensitivity.
+ * @init_evtlog_ptr: event log offset for init ucode.
+ * @init_evtlog_size: event log size for init ucode.
+ * @init_errlog_ptr: error log offfset for init ucode.
+ * @inst_evtlog_ptr: event log offset for runtime ucode.
+ * @inst_evtlog_size: event log size for runtime ucode.
+ * @inst_errlog_ptr: error log offfset for runtime ucode.
+ * @type: firmware type (&enum iwl_fw_type)
+ * @human_readable: human readable version
+ * we get the ALIVE from the uCode
+ * @phy_integration_ver: PHY integration version string
+ * @phy_integration_ver_len: length of @phy_integration_ver
+ * @dump_excl: image dump exclusion areas for RT image
+ * @dump_excl_wowlan: image dump exclusion areas for WoWLAN image
+ */
+struct iwl_fw {
+ u32 ucode_ver;
+
+ char fw_version[64];
+
+ /* ucode images */
+ struct fw_img img[IWL_UCODE_TYPE_MAX];
+ size_t iml_len;
+ u8 *iml;
+
+ struct iwl_ucode_capabilities ucode_capa;
+ bool enhance_sensitivity_table;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+ struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
+ u32 phy_config;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+
+ enum iwl_fw_type type;
+
+ u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+
+ struct iwl_fw_dbg dbg;
+
+ u8 *phy_integration_ver;
+ u32 phy_integration_ver_len;
+
+ struct iwl_dump_exclude dump_excl[2], dump_excl_wowlan[2];
+};
+
+static inline const char *get_fw_dbg_mode_string(int mode)
+{
+ switch (mode) {
+ case SMEM_MODE:
+ return "SMEM";
+ case EXTERNAL_MODE:
+ return "EXTERNAL_DRAM";
+ case MARBH_MODE:
+ return "MARBH";
+ case MIPI_MODE:
+ return "MIPI";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id];
+
+ if (!conf_tlv)
+ return false;
+
+ return conf_tlv->usniffer;
+}
+
+static inline const struct fw_img *
+iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
+{
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+
+ return &fw->img[ucode_type];
+}
+
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
+
+u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+const char *iwl_fw_lookup_assert_desc(u32 num);
+
+#define FW_SYSASSERT_CPU_MASK 0xf0000000
+#define FW_SYSASSERT_PNVM_MISSING 0x0010070d
+
+#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
new file mode 100644
index 000000000..135bd48bf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2019-2021 Intel Corporation
+ */
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+
+#include "fw/api/system.h"
+#include "fw/api/commands.h"
+#include "fw/api/rx.h"
+#include "fw/api/datapath.h"
+
+void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx,
+ struct dentry *dbgfs_dir)
+{
+ int i;
+
+ memset(fwrt, 0, sizeof(*fwrt));
+ fwrt->trans = trans;
+ fwrt->fw = fw;
+ fwrt->dev = trans->dev;
+ fwrt->dump.conf = FW_DBG_INVALID;
+ fwrt->ops = ops;
+ fwrt->sanitize_ops = sanitize_ops;
+ fwrt->sanitize_ctx = sanitize_ctx;
+ fwrt->ops_ctx = ops_ctx;
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ fwrt->dump.wks[i].idx = i;
+ INIT_DELAYED_WORK(&fwrt->dump.wks[i].wk, iwl_fw_error_dump_wk);
+ }
+ iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
+
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
+{
+ iwl_fw_suspend_timestamp(fwrt);
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
+
+void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
+{
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, NULL);
+ iwl_fw_resume_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
+
+/* set device type and latency */
+int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_soc_configuration_cmd cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ int ret;
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other bits.
+ */
+ if (!fwrt->trans->trans_cfg->integrated)
+ cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
+
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
+ SOC_FLAGS_LTR_APPLY_DELAY_NONE);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_200);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_2500);
+ BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
+ SOC_FLAGS_LTR_APPLY_DELAY_1820);
+
+ if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
+ !WARN_ON(!fwrt->trans->trans_cfg->integrated))
+ cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
+ SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+
+ if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
+ fwrt->trans->trans_cfg->low_latency_xtal)
+ cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
+
+ cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency);
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (ret)
+ IWL_ERR(fwrt, "Failed to set soc latency: %d\n", ret);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_set_soc_latency);
+
+int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
+{
+ int i, num_queues, size, ret;
+ struct iwl_rfh_queue_config *cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ /*
+ * The default queue is configured via context info, so if we
+ * have a single queue, there's nothing to do here.
+ */
+ if (fwrt->trans->num_rx_queues == 1)
+ return 0;
+
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000)
+ return 0;
+
+ /* skip the default queue */
+ num_queues = fwrt->trans->num_rx_queues - 1;
+
+ size = struct_size(cmd, data, num_queues);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = size;
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+
+ kfree(cmd);
+
+ if (ret)
+ IWL_ERR(fwrt, "Failed to configure RX queues: %d\n", ret);
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_configure_rxq);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
new file mode 100644
index 000000000..231d2517f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/sched.h>
+#include <linux/export.h>
+
+#include "iwl-drv.h"
+#include "notif-wait.h"
+
+
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
+{
+ spin_lock_init(&notif_wait->notif_wait_lock);
+ INIT_LIST_HEAD(&notif_wait->notif_waits);
+ init_waitqueue_head(&notif_wait->notif_waitq);
+}
+IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
+
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt)
+{
+ bool triggered = false;
+
+ if (!list_empty(&notif_wait->notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_for_each_entry(w, &notif_wait->notif_waits, list) {
+ int i;
+ bool found = false;
+
+ /*
+ * If it already finished (triggered) or has been
+ * aborted then don't evaluate it again to avoid races,
+ * Otherwise the function could be called again even
+ * though it returned true before
+ */
+ if (w->triggered || w->aborted)
+ continue;
+
+ for (i = 0; i < w->n_cmds; i++) {
+ u16 rec_id = WIDE_ID(pkt->hdr.group_id,
+ pkt->hdr.cmd);
+
+ if (w->cmds[i] == rec_id ||
+ (!iwl_cmd_groupid(w->cmds[i]) &&
+ DEF_ID(w->cmds[i]) == rec_id)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ continue;
+
+ if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
+ w->triggered = true;
+ triggered = true;
+ }
+ }
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+ }
+
+ return triggered;
+}
+IWL_EXPORT_SYMBOL(iwl_notification_wait);
+
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
+{
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+
+ wake_up_all(&notif_wait->notif_waitq);
+}
+IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
+
+void
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry,
+ const u16 *cmds, int n_cmds,
+ bool (*fn)(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data),
+ void *fn_data)
+{
+ if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
+ n_cmds = MAX_NOTIF_CMDS;
+
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->n_cmds = n_cmds;
+ memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_add(&wait_entry->list, &notif_wait->notif_waits);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+}
+IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
+
+void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&notif_wait->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
+}
+IWL_EXPORT_SYMBOL(iwl_remove_notification);
+
+int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(notif_wait->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ iwl_remove_notification(notif_wait, wait_entry);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_wait_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
new file mode 100644
index 000000000..49e8ba11b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#ifndef __iwl_notif_wait_h__
+#define __iwl_notif_wait_h__
+
+#include <linux/wait.h>
+
+#include "iwl-trans.h"
+
+struct iwl_notif_wait_data {
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+};
+
+#define MAX_NOTIF_CMDS 5
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: Function called with the notification. If the function
+ * returns true, the wait is over, if it returns false then
+ * the waiter stays blocked. If no function is given, any
+ * of the listed commands will unblock the waiter.
+ * @cmds: command IDs
+ * @n_cmds: number of command IDs
+ * @triggered: waiter should be woken up
+ * @aborted: wait was aborted
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwl_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwl_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ bool (*fn)(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data);
+ void *fn_data;
+
+ u16 cmds[MAX_NOTIF_CMDS];
+ u8 n_cmds;
+ bool triggered, aborted;
+};
+
+
+/* caller functions */
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt);
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+
+static inline void
+iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
+{
+ wake_up_all(&notif_data->notif_waitq);
+}
+
+static inline void
+iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt)
+{
+ if (iwl_notification_wait(notif_data, pkt))
+ iwl_notification_notify(notif_data);
+}
+
+/* user functions */
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry,
+ const u16 *cmds, int n_cmds,
+ bool (*fn)(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data),
+ void *fn_data);
+
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_notif_wait_data *notif_data,
+ struct iwl_notification_wait *wait_entry);
+
+#endif /* __iwl_notif_wait_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
new file mode 100644
index 000000000..945bc4160
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "fw/api/commands.h"
+
+void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt)
+{
+ int i;
+
+ if (!fwrt->fw_paging_db[0].fw_paging_block)
+ return;
+
+ for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+ struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i];
+
+ if (!paging->fw_paging_block) {
+ IWL_DEBUG_FW(fwrt,
+ "Paging: block %d already freed, continue to next page\n",
+ i);
+
+ continue;
+ }
+ dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys,
+ paging->fw_paging_size, DMA_BIDIRECTIONAL);
+
+ __free_pages(paging->fw_paging_block,
+ get_order(paging->fw_paging_size));
+ paging->fw_paging_block = NULL;
+ }
+
+ memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
+}
+IWL_EXPORT_SYMBOL(iwl_free_fw_paging);
+
+static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
+ const struct fw_img *image)
+{
+ struct page *block;
+ dma_addr_t phys = 0;
+ int blk_idx, order, num_of_pages, size;
+
+ if (fwrt->fw_paging_db[0].fw_paging_block)
+ return 0;
+
+ /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+ BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+ num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+ fwrt->num_of_paging_blk =
+ DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
+ fwrt->num_of_pages_in_last_blk =
+ num_of_pages -
+ NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1);
+
+ IWL_DEBUG_FW(fwrt,
+ "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+ fwrt->num_of_paging_blk,
+ fwrt->num_of_pages_in_last_blk);
+
+ /*
+ * Allocate CSS and paging blocks in dram.
+ */
+ for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
+ /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
+ size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
+ order = get_order(size);
+ block = alloc_pages(GFP_KERNEL, order);
+ if (!block) {
+ /* free all the previous pages since we failed */
+ iwl_free_fw_paging(fwrt);
+ return -ENOMEM;
+ }
+
+ fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
+ fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
+
+ phys = dma_map_page(fwrt->trans->dev, block, 0,
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(fwrt->trans->dev, phys)) {
+ /*
+ * free the previous pages and the current one
+ * since we failed to map_page.
+ */
+ iwl_free_fw_paging(fwrt);
+ return -ENOMEM;
+ }
+ fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
+
+ if (!blk_idx)
+ IWL_DEBUG_FW(fwrt,
+ "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+ order);
+ else
+ IWL_DEBUG_FW(fwrt,
+ "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+ order);
+ }
+
+ return 0;
+}
+
+static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+ const struct fw_img *image)
+{
+ int sec_idx, idx, ret;
+ u32 offset = 0;
+
+ /*
+ * find where is the paging image start point:
+ * if CPU2 exist and it's in paging format, then the image looks like:
+ * CPU1 sections (2 or more)
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+ * CPU2 sections (not paged)
+ * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+ * non paged to CPU2 paging sec
+ * CPU2 paging CSS
+ * CPU2 paging image (including instruction and data)
+ */
+ for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
+ if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+ sec_idx++;
+ break;
+ }
+ }
+
+ /*
+ * If paging is enabled there should be at least 2 more sections left
+ * (one for CSS and one for Paging data)
+ */
+ if (sec_idx >= image->num_sec - 1) {
+ IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* copy the CSS block to the dram */
+ IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
+ sec_idx);
+
+ if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
+ IWL_ERR(fwrt, "CSS block is larger than paging size\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
+ image->sec[sec_idx].data,
+ image->sec[sec_idx].len);
+ fwrt->fw_paging_db[0].fw_offs = image->sec[sec_idx].offset;
+ dma_sync_single_for_device(fwrt->trans->dev,
+ fwrt->fw_paging_db[0].fw_paging_phys,
+ fwrt->fw_paging_db[0].fw_paging_size,
+ DMA_BIDIRECTIONAL);
+
+ IWL_DEBUG_FW(fwrt,
+ "Paging: copied %d CSS bytes to first block\n",
+ fwrt->fw_paging_db[0].fw_paging_size);
+
+ sec_idx++;
+
+ /*
+ * Copy the paging blocks to the dram. The loop index starts
+ * from 1 since the CSS block (index 0) was already copied to
+ * dram. We use num_of_paging_blk + 1 to account for that.
+ */
+ for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) {
+ struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ int remaining = image->sec[sec_idx].len - offset;
+ int len = block->fw_paging_size;
+
+ /*
+ * For the last block, we copy all that is remaining,
+ * for all other blocks, we copy fw_paging_size at a
+ * time. */
+ if (idx == fwrt->num_of_paging_blk) {
+ len = remaining;
+ if (remaining !=
+ fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) {
+ IWL_ERR(fwrt,
+ "Paging: last block contains more data than expected %d\n",
+ remaining);
+ ret = -EINVAL;
+ goto err;
+ }
+ } else if (block->fw_paging_size > remaining) {
+ IWL_ERR(fwrt,
+ "Paging: not enough data in other in block %d (%d)\n",
+ idx, remaining);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ memcpy(page_address(block->fw_paging_block),
+ (const u8 *)image->sec[sec_idx].data + offset, len);
+ block->fw_offs = image->sec[sec_idx].offset + offset;
+ dma_sync_single_for_device(fwrt->trans->dev,
+ block->fw_paging_phys,
+ block->fw_paging_size,
+ DMA_BIDIRECTIONAL);
+
+ IWL_DEBUG_FW(fwrt,
+ "Paging: copied %d paging bytes to block %d\n",
+ len, idx);
+
+ offset += block->fw_paging_size;
+ }
+
+ return 0;
+
+err:
+ iwl_free_fw_paging(fwrt);
+ return ret;
+}
+
+static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
+ const struct fw_img *fw)
+{
+ int ret;
+
+ ret = iwl_alloc_fw_paging_mem(fwrt, fw);
+ if (ret)
+ return ret;
+
+ return iwl_fill_paging_mem(fwrt, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
+ const struct fw_img *fw)
+{
+ struct iwl_fw_paging_cmd paging_cmd = {
+ .flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
+ PAGING_CMD_IS_ENABLED |
+ (fwrt->num_of_pages_in_last_blk <<
+ PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+ .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+ .block_num = cpu_to_le32(fwrt->num_of_paging_blk),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
+ .len = { sizeof(paging_cmd), },
+ .data = { &paging_cmd, },
+ };
+ int blk_idx;
+
+ /* loop for for all paging blocks + CSS block */
+ for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
+ dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
+ __le32 phy_addr;
+
+ addr = addr >> PAGE_2_EXP_SIZE;
+ phy_addr = cpu_to_le32(addr);
+ paging_cmd.device_phy_addr[blk_idx] = phy_addr;
+ }
+
+ return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
+{
+ const struct fw_img *fw = &fwrt->fw->img[type];
+ int ret;
+
+ if (fwrt->trans->trans_cfg->gen2)
+ return 0;
+
+ /*
+ * Configure and operate fw paging mechanism.
+ * The driver configures the paging flow only once.
+ * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+ */
+ if (!fw->paging_mem_size)
+ return 0;
+
+ ret = iwl_save_fw_paging(fwrt, fw);
+ if (ret) {
+ IWL_ERR(fwrt, "failed to save the FW paging image\n");
+ return ret;
+ }
+
+ ret = iwl_send_paging_cmd(fwrt, fw);
+ if (ret) {
+ IWL_ERR(fwrt, "failed to send the paging cmd\n");
+ iwl_free_fw_paging(fwrt);
+ return ret;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_init_paging);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
new file mode 100644
index 000000000..b6d3ac6ed
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2020-2021 Intel Corporation
+ */
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "fw/api/commands.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/alive.h"
+#include "fw/uefi.h"
+
+struct iwl_pnvm_section {
+ __le32 offset;
+ const u8 data[];
+} __packed;
+
+static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_trans *trans = (struct iwl_trans *)data;
+ struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data;
+
+ IWL_DEBUG_FW(trans,
+ "PNVM complete notification received with status 0x%0x\n",
+ le32_to_cpu(pnvm_ntf->status));
+
+ return true;
+}
+
+static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ const struct iwl_ucode_tlv *tlv;
+ u32 sha1 = 0;
+ u16 mac_type = 0, rf_id = 0;
+ u8 *pnvm_data = NULL, *tmp;
+ bool hw_match = false;
+ u32 size = 0;
+ int ret;
+
+ IWL_DEBUG_FW(trans, "Handling PNVM section\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_PNVM_VERSION:
+ if (tlv_len < sizeof(__le32)) {
+ IWL_DEBUG_FW(trans,
+ "Invalid size for IWL_UCODE_TLV_PNVM_VERSION (expected %zd, got %d)\n",
+ sizeof(__le32), tlv_len);
+ break;
+ }
+
+ sha1 = le32_to_cpup((const __le32 *)data);
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
+ sha1);
+ break;
+ case IWL_UCODE_TLV_HW_TYPE:
+ if (tlv_len < 2 * sizeof(__le16)) {
+ IWL_DEBUG_FW(trans,
+ "Invalid size for IWL_UCODE_TLV_HW_TYPE (expected %zd, got %d)\n",
+ 2 * sizeof(__le16), tlv_len);
+ break;
+ }
+
+ if (hw_match)
+ break;
+
+ mac_type = le16_to_cpup((const __le16 *)data);
+ rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
+ mac_type, rf_id);
+
+ if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
+ rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+ hw_match = true;
+ break;
+ case IWL_UCODE_TLV_SEC_RT: {
+ const struct iwl_pnvm_section *section = (const void *)data;
+ u32 data_len = tlv_len - sizeof(*section);
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_SEC_RT len %d\n",
+ tlv_len);
+
+ /* TODO: remove, this is a deprecated separator */
+ if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
+ IWL_DEBUG_FW(trans, "Ignoring separator.\n");
+ break;
+ }
+
+ IWL_DEBUG_FW(trans, "Adding data (size %d)\n",
+ data_len);
+
+ tmp = krealloc(pnvm_data, size + data_len, GFP_KERNEL);
+ if (!tmp) {
+ IWL_DEBUG_FW(trans,
+ "Couldn't allocate (more) pnvm_data\n");
+
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnvm_data = tmp;
+
+ memcpy(pnvm_data + size, section->data, data_len);
+
+ size += data_len;
+
+ break;
+ }
+ case IWL_UCODE_TLV_PNVM_SKU:
+ IWL_DEBUG_FW(trans,
+ "New PNVM section started, stop parsing.\n");
+ goto done;
+ default:
+ IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+ tlv_type, tlv_len);
+ break;
+ }
+
+ len -= ALIGN(tlv_len, 4);
+ data += ALIGN(tlv_len, 4);
+ }
+
+done:
+ if (!hw_match) {
+ IWL_DEBUG_FW(trans,
+ "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
+ CSR_HW_REV_TYPE(trans->hw_rev),
+ CSR_HW_RFID_TYPE(trans->hw_rf_id));
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!size) {
+ IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ IWL_INFO(trans, "loaded PNVM version %08x\n", sha1);
+
+ ret = iwl_trans_set_pnvm(trans, pnvm_data, size);
+out:
+ kfree(pnvm_data);
+ return ret;
+}
+
+static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ const struct iwl_ucode_tlv *tlv;
+
+ IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+ tlv_len);
+ IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+ le32_to_cpu(sku_id->data[0]),
+ le32_to_cpu(sku_id->data[1]),
+ le32_to_cpu(sku_id->data[2]));
+
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+
+ if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ int ret;
+
+ ret = iwl_pnvm_handle_section(trans, data, len);
+ if (!ret)
+ return 0;
+ } else {
+ IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+ }
+ } else {
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
+{
+ const struct firmware *pnvm;
+ char pnvm_name[MAX_PNVM_NAME];
+ size_t new_len;
+ int ret;
+
+ iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ return ret;
+ }
+
+ new_len = pnvm->size;
+ *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ release_firmware(pnvm);
+
+ if (!*data)
+ return -ENOMEM;
+
+ *len = new_len;
+
+ return 0;
+}
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait)
+{
+ u8 *data;
+ size_t len;
+ struct pnvm_sku_package *package;
+ struct iwl_notification_wait pnvm_wait;
+ static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ PNVM_INIT_COMPLETE_NTFY) };
+ int ret;
+
+ /* if the SKU_ID is empty, there's nothing to do */
+ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+ return 0;
+
+ /*
+ * If we already loaded (or tried to load) it before, we just
+ * need to set it again.
+ */
+ if (trans->pnvm_loaded) {
+ ret = iwl_trans_set_pnvm(trans, NULL, 0);
+ if (ret)
+ return ret;
+ goto skip_parse;
+ }
+
+ /* First attempt to get the PNVM from BIOS */
+ package = iwl_uefi_get_pnvm(trans, &len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (len >= sizeof(*package)) {
+ /* we need only the data */
+ len -= sizeof(*package);
+ data = kmemdup(package->data, len, GFP_KERNEL);
+ } else {
+ data = NULL;
+ }
+
+ /* free package regardless of whether kmemdup succeeded */
+ kfree(package);
+
+ if (data)
+ goto parse;
+ }
+
+ /* If it's not available, try from the filesystem */
+ ret = iwl_pnvm_get_from_fs(trans, &data, &len);
+ if (ret) {
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->pnvm_loaded = true;
+
+ goto skip_parse;
+ }
+
+parse:
+ iwl_pnvm_parse(trans, data, len);
+
+ kfree(data);
+
+skip_parse:
+ data = NULL;
+ /* now try to get the reduce power table, if not loaded yet */
+ if (!trans->reduce_power_loaded) {
+ data = iwl_uefi_get_reduced_power(trans, &len);
+ if (IS_ERR_OR_NULL(data)) {
+ /*
+ * Pretend we've loaded it - at least we've tried and
+ * couldn't load it at all, so there's no point in
+ * trying again over and over.
+ */
+ trans->reduce_power_loaded = true;
+
+ goto skip_reduce_power;
+ }
+ }
+
+ ret = iwl_trans_set_reduce_power(trans, data, len);
+ if (ret)
+ IWL_DEBUG_FW(trans,
+ "Failed to set reduce power table %d\n",
+ ret);
+ kfree(data);
+
+skip_reduce_power:
+ iwl_init_notification_wait(notif_wait, &pnvm_wait,
+ ntf_cmds, ARRAY_SIZE(ntf_cmds),
+ iwl_pnvm_complete_fn, trans);
+
+ /* kick the doorbell */
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_PNVM);
+
+ return iwl_wait_notification(notif_wait, &pnvm_wait,
+ MVM_UCODE_PNVM_TIMEOUT);
+}
+IWL_EXPORT_SYMBOL(iwl_pnvm_load);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
new file mode 100644
index 000000000..203c367dd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/******************************************************************************
+ *
+ * Copyright(c) 2020-2021 Intel Corporation
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PNVM_H__
+#define __IWL_PNVM_H__
+
+#include "fw/notif-wait.h"
+
+#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
+
+#define MAX_PNVM_NAME 64
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait);
+
+static inline
+void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
+ u8 *pnvm_name, size_t max_len)
+{
+ int pre_len;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ pre_len = strlen(trans->cfg->fw_name_pre);
+ if (pre_len < max_len && pre_len > 0)
+ pnvm_name[pre_len - 1] = '.';
+}
+
+#endif /* __IWL_PNVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
new file mode 100644
index 000000000..a83521461
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include "fw/api/rs.h"
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+#define IWL_DECLARE_RATE_INFO(r) \
+ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ * */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1),
+ IWL_DECLARE_RATE_INFO(2),
+ IWL_DECLARE_RATE_INFO(5),
+ IWL_DECLARE_RATE_INFO(11),
+ IWL_DECLARE_RATE_INFO(6),
+ IWL_DECLARE_RATE_INFO(9),
+ IWL_DECLARE_RATE_INFO(12),
+ IWL_DECLARE_RATE_INFO(18),
+ IWL_DECLARE_RATE_INFO(24),
+ IWL_DECLARE_RATE_INFO(36),
+ IWL_DECLARE_RATE_INFO(48),
+ IWL_DECLARE_RATE_INFO(54),
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+static const char * const ant_name[] = {
+ [ANT_NONE] = "None",
+ [ANT_A] = "A",
+ [ANT_B] = "B",
+ [ANT_AB] = "AB",
+};
+
+static const char * const pretty_bw[] = {
+ "20Mhz",
+ "40Mhz",
+ "80Mhz",
+ "160 Mhz",
+ "320Mhz",
+};
+
+u8 iwl_fw_rate_idx_to_plcp(int idx)
+{
+ return fw_rate_idx_to_plcp[idx];
+}
+IWL_EXPORT_SYMBOL(iwl_fw_rate_idx_to_plcp);
+
+const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx)
+{
+ return &rate_mcs[idx];
+}
+IWL_EXPORT_SYMBOL(iwl_rate_mcs);
+
+const char *iwl_rs_pretty_ant(u8 ant)
+{
+ if (ant >= ARRAY_SIZE(ant_name))
+ return "UNKNOWN";
+
+ return ant_name[ant];
+}
+IWL_EXPORT_SYMBOL(iwl_rs_pretty_ant);
+
+const char *iwl_rs_pretty_bw(int bw)
+{
+ if (bw >= ARRAY_SIZE(pretty_bw))
+ return "unknown bw";
+
+ return pretty_bw[bw];
+}
+IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
+u32 iwl_new_rate_from_v1(u32 rate_v1)
+{
+ u32 rate_v2 = 0;
+ u32 dup = 0;
+
+ if (rate_v1 == 0)
+ return rate_v1;
+ /* convert rate */
+ if (rate_v1 & RATE_MCS_HT_MSK_V1) {
+ u32 nss = 0;
+
+ rate_v2 |= RATE_MCS_HT_MSK;
+ rate_v2 |=
+ rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
+ nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >>
+ RATE_HT_MCS_NSS_POS_V1;
+ rate_v2 |= nss << RATE_MCS_NSS_POS;
+ } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
+ rate_v1 & RATE_MCS_HE_MSK_V1) {
+ rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
+
+ rate_v2 |= rate_v1 & RATE_VHT_MCS_MIMO2_MSK;
+
+ if (rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
+ u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
+ RATE_MCS_HE_106T_POS_V1;
+ u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
+ RATE_MCS_HE_GI_LTF_POS;
+
+ if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
+ he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
+ he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
+ /* the new rate have an additional bit to
+ * represent the value 4 rather then using SGI
+ * bit for this purpose - as it was done in the old
+ * rate */
+ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
+ RATE_MCS_SGI_POS_V1;
+
+ rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
+ rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS;
+ rate_v2 |= he_106t << RATE_MCS_HE_106T_POS;
+ rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
+ rate_v2 |= RATE_MCS_HE_MSK;
+ } else {
+ rate_v2 |= RATE_MCS_VHT_MSK;
+ }
+ /* if legacy format */
+ } else {
+ u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
+
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
+ rate_v2 |= legacy_rate;
+ if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
+ rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
+ }
+
+ /* convert flags */
+ if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
+ rate_v2 |= RATE_MCS_LDPC_MSK;
+ rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate_v1 & RATE_MCS_ANT_AB_MSK) |
+ (rate_v1 & RATE_MCS_STBC_MSK) |
+ (rate_v1 & RATE_MCS_BF_MSK);
+
+ dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
+ if (dup) {
+ rate_v2 |= RATE_MCS_DUP_MSK;
+ rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS;
+ }
+
+ if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
+ (rate_v1 & RATE_MCS_SGI_MSK_V1))
+ rate_v2 |= RATE_MCS_SGI_MSK;
+
+ return rate_v2;
+}
+IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
+
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
+{
+ char *type;
+ u8 mcs = 0, nss = 0;
+ u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+ u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >>
+ RATE_MCS_CHAN_WIDTH_POS;
+ u32 format = rate & RATE_MCS_MOD_TYPE_MSK;
+ bool sgi;
+
+ if (format == RATE_MCS_CCK_MSK ||
+ format == RATE_MCS_LEGACY_OFDM_MSK) {
+ int legacy_rate = rate & RATE_LEGACY_RATE_MSK;
+ int index = format == RATE_MCS_CCK_MSK ?
+ legacy_rate :
+ legacy_rate + IWL_FIRST_OFDM_RATE;
+
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
+ iwl_rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs(index)->mbps);
+ }
+
+ if (format == RATE_MCS_VHT_MSK)
+ type = "VHT";
+ else if (format == RATE_MCS_HT_MSK)
+ type = "HT";
+ else if (format == RATE_MCS_HE_MSK)
+ type = "HE";
+ else
+ type = "Unknown"; /* shouldn't happen */
+
+ mcs = format == RATE_MCS_HT_MSK ?
+ RATE_HT_MCS_INDEX(rate) :
+ rate & RATE_MCS_CODE_MSK;
+ nss = ((rate & RATE_MCS_NSS_MSK)
+ >> RATE_MCS_NSS_POS) + 1;
+ sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate) :
+ rate & RATE_MCS_SGI_MSK;
+
+ return scnprintf(buf, bufsz,
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
+ rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss,
+ (sgi) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+}
+IWL_EXPORT_SYMBOL(rs_pretty_print_rate);
+
+bool iwl_he_is_sgi(u32 rate_n_flags)
+{
+ u32 type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u32 ltf_gi = rate_n_flags & RATE_MCS_HE_GI_LTF_MSK;
+
+ if (type == RATE_MCS_HE_TYPE_SU ||
+ type == RATE_MCS_HE_TYPE_EXT_SU)
+ return ltf_gi == RATE_MCS_HE_SU_4_LTF_08_GI;
+ return false;
+}
+IWL_EXPORT_SYMBOL(iwl_he_is_sgi);
+
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
new file mode 100644
index 000000000..d3cb1ae68
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2022 Intel Corporation
+ */
+#ifndef __iwl_fw_runtime_h__
+#define __iwl_fw_runtime_h__
+
+#include "iwl-config.h"
+#include "iwl-trans.h"
+#include "img.h"
+#include "fw/api/debug.h"
+#include "fw/api/paging.h"
+#include "fw/api/power.h"
+#include "iwl-eeprom-parse.h"
+#include "fw/acpi.h"
+
+struct iwl_fw_runtime_ops {
+ void (*dump_start)(void *ctx);
+ void (*dump_end)(void *ctx);
+ bool (*fw_running)(void *ctx);
+ int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
+ bool (*d3_debug_enable)(void *ctx);
+};
+
+#define MAX_NUM_LMAC 2
+struct iwl_fwrt_shared_mem_cfg {
+ int num_lmacs;
+ int num_txfifo_entries;
+ struct {
+ u32 txfifo_size[TX_FIFO_MAX_NUM];
+ u32 rxfifo1_size;
+ } lmac[MAX_NUM_LMAC];
+ u32 rxfifo2_size;
+ u32 rxfifo2_control_size;
+ u32 internal_txfifo_addr;
+ u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+};
+
+#define IWL_FW_RUNTIME_DUMP_WK_NUM 5
+
+/**
+ * struct iwl_fwrt_dump_data - dump data
+ * @trig: trigger the worker was scheduled upon
+ * @fw_pkt: packet received from FW
+ */
+struct iwl_fwrt_dump_data {
+ union {
+ struct {
+ struct iwl_fw_ini_trigger_tlv *trig;
+ struct iwl_rx_packet *fw_pkt;
+ };
+ struct {
+ const struct iwl_fw_dump_desc *desc;
+ bool monitor_only;
+ };
+ };
+};
+
+/**
+ * struct iwl_fwrt_wk_data - dump worker data struct
+ * @idx: index of the worker
+ * @wk: worker
+ */
+struct iwl_fwrt_wk_data {
+ u8 idx;
+ struct delayed_work wk;
+ struct iwl_fwrt_dump_data dump_data;
+};
+
+/**
+ * struct iwl_txf_iter_data - Tx fifo iterator data struct
+ * @fifo: fifo number
+ * @lmac: lmac number
+ * @fifo_size: fifo size
+ * @internal_txf: non zero if fifo is internal Tx fifo
+ */
+struct iwl_txf_iter_data {
+ int fifo;
+ int lmac;
+ u32 fifo_size;
+ u8 internal_txf;
+};
+
+/**
+ * struct iwl_fw_runtime - runtime data for firmware
+ * @fw: firmware image
+ * @cfg: NIC configuration
+ * @dev: device pointer
+ * @ops: user ops
+ * @ops_ctx: user ops context
+ * @fw_paging_db: paging database
+ * @num_of_paging_blk: number of paging blocks
+ * @num_of_pages_in_last_blk: number of pages in the last block
+ * @smem_cfg: saved firmware SMEM configuration
+ * @cur_fw_img: current firmware image, must be maintained by
+ * the driver by calling &iwl_fw_set_current_image()
+ * @dump: debug dump data
+ */
+struct iwl_fw_runtime {
+ struct iwl_trans *trans;
+ const struct iwl_fw *fw;
+ struct device *dev;
+
+ const struct iwl_fw_runtime_ops *ops;
+ void *ops_ctx;
+
+ const struct iwl_dump_sanitize_ops *sanitize_ops;
+ void *sanitize_ctx;
+
+ /* Paging */
+ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+ u16 num_of_paging_blk;
+ u16 num_of_pages_in_last_blk;
+
+ enum iwl_ucode_type cur_fw_img;
+
+ /* memory configuration */
+ struct iwl_fwrt_shared_mem_cfg smem_cfg;
+
+ /* debug */
+ struct {
+ struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
+ unsigned long active_wks;
+
+ u8 conf;
+
+ /* ts of the beginning of a non-collect fw dbg data period */
+ unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
+ u32 *d3_debug_data;
+ u32 lmac_err_id[MAX_NUM_LMAC];
+ u32 umac_err_id;
+
+ struct iwl_txf_iter_data txf_iter_data;
+
+ struct {
+ u8 type;
+ u8 subtype;
+ u32 lmac_major;
+ u32 lmac_minor;
+ u32 umac_major;
+ u32 umac_minor;
+ } fw_ver;
+ } dump;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ struct delayed_work wk;
+ u32 delay;
+ u64 seq;
+ } timestamp;
+ bool tpc_enabled;
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+#ifdef CONFIG_ACPI
+ struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ u8 sar_chain_a_profile;
+ u8 sar_chain_b_profile;
+ struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
+ u32 geo_rev;
+ u32 geo_num_profiles;
+ bool geo_enabled;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+ u32 ppag_flags;
+ u32 ppag_ver;
+ struct iwl_sar_offset_mapping_cmd sgom_table;
+ bool sgom_enabled;
+ u8 reduced_power_flags;
+#endif
+};
+
+void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx,
+ struct dentry *dbgfs_dir);
+
+static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
+{
+ int i;
+
+ kfree(fwrt->dump.d3_debug_data);
+ fwrt->dump.d3_debug_data = NULL;
+
+ iwl_dbg_tlv_del_timers(fwrt->trans);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
+ cancel_delayed_work_sync(&fwrt->dump.wks[i].wk);
+}
+
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
+
+void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
+ enum iwl_ucode_type cur_fw_img)
+{
+ fwrt->cur_fw_img = cur_fw_img;
+}
+
+int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type);
+void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
+
+void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
+int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt);
+int iwl_configure_rxq(struct iwl_fw_runtime *fwrt);
+
+#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
new file mode 100644
index 000000000..3f1272014
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "fw/api/commands.h"
+
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
+ int i, lmac;
+ int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
+ u8 api_ver = iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP,
+ SHARED_MEM_CFG_CMD, 0);
+
+ if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
+ return;
+
+ fwrt->smem_cfg.num_lmacs = lmac_num;
+ fwrt->smem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
+ fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
+
+ if (api_ver >= 4 &&
+ !WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) < sizeof(*mem_cfg))) {
+ fwrt->smem_cfg.rxfifo2_control_size =
+ le32_to_cpu(mem_cfg->rxfifo2_control_size);
+ }
+
+ for (lmac = 0; lmac < lmac_num; lmac++) {
+ struct iwl_shared_mem_lmac_cfg *lmac_cfg =
+ &mem_cfg->lmac_smem[lmac];
+
+ for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
+ fwrt->smem_cfg.lmac[lmac].txfifo_size[i] =
+ le32_to_cpu(lmac_cfg->txfifo_size[i]);
+ fwrt->smem_cfg.lmac[lmac].rxfifo1_size =
+ le32_to_cpu(lmac_cfg->rxfifo1_size);
+ }
+}
+
+static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
+ int i;
+
+ fwrt->smem_cfg.num_lmacs = 1;
+
+ fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
+ fwrt->smem_cfg.lmac[0].txfifo_size[i] =
+ le32_to_cpu(mem_cfg->txfifo_size[i]);
+
+ fwrt->smem_cfg.lmac[0].rxfifo1_size =
+ le32_to_cpu(mem_cfg->rxfifo_size[0]);
+ fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
+
+ /* new API has more data, from rxfifo_addr field and on */
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) !=
+ sizeof(mem_cfg->internal_txfifo_size));
+
+ fwrt->smem_cfg.internal_txfifo_addr =
+ le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+ for (i = 0;
+ i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
+ i++)
+ fwrt->smem_cfg.internal_txfifo_size[i] =
+ le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+ }
+}
+
+void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_host_cmd cmd = {
+ .flags = CMD_WANT_SKB,
+ .data = { NULL, },
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
+ int ret;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
+ else
+ cmd.id = SHARED_MEM_CFG;
+
+ ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
+
+ if (ret) {
+ WARN(ret != -ERFKILL,
+ "Could not send the SMEM command: %d\n", ret);
+ return;
+ }
+
+ pkt = cmd.resp_pkt;
+ if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ iwl_parse_shared_mem_22000(fwrt, pkt);
+ else
+ iwl_parse_shared_mem(fwrt, pkt);
+
+ IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n");
+
+ iwl_free_resp(&cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
new file mode 100644
index 000000000..6d408cd0f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+
+#include "fw/uefi.h"
+#include "fw/api/alive.h"
+#include <linux/efi.h>
+#include "fw/runtime.h"
+
+#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
+ 0xb2, 0xec, 0xf5, 0xa3, \
+ 0x59, 0x4f, 0x4a, 0xea)
+
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+ void *data;
+ unsigned long package_size;
+ efi_status_t status;
+
+ *len = 0;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_PNVM_SIZE;
+
+ data = kmalloc(package_size, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ status = efi.get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
+ IWL_DEBUG_FW(trans,
+ "PNVM UEFI variable not found 0x%lx (len %lu)\n",
+ status, package_size);
+ kfree(data);
+ return ERR_PTR(-ENOENT);
+ }
+
+ IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size);
+ *len = package_size;
+
+ return data;
+}
+
+static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
+ const u8 *data, size_t len)
+{
+ const struct iwl_ucode_tlv *tlv;
+ u8 *reduce_power_data = NULL, *tmp;
+ u32 size = 0;
+
+ IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ kfree(reduce_power_data);
+ reduce_power_data = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_MEM_DESC: {
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_MEM_DESC len %d\n",
+ tlv_len);
+
+ IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len);
+
+ tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL);
+ if (!tmp) {
+ IWL_DEBUG_FW(trans,
+ "Couldn't allocate (more) reduce_power_data\n");
+
+ kfree(reduce_power_data);
+ reduce_power_data = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ reduce_power_data = tmp;
+
+ memcpy(reduce_power_data + size, data, tlv_len);
+
+ size += tlv_len;
+
+ break;
+ }
+ case IWL_UCODE_TLV_PNVM_SKU:
+ IWL_DEBUG_FW(trans,
+ "New REDUCE_POWER section started, stop parsing.\n");
+ goto done;
+ default:
+ IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+ tlv_type, tlv_len);
+ break;
+ }
+
+ len -= ALIGN(tlv_len, 4);
+ data += ALIGN(tlv_len, 4);
+ }
+
+done:
+ if (!size) {
+ IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+ /* Better safe than sorry, but 'reduce_power_data' should
+ * always be NULL if !size.
+ */
+ kfree(reduce_power_data);
+ reduce_power_data = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ IWL_INFO(trans, "loaded REDUCE_POWER\n");
+
+out:
+ return reduce_power_data;
+}
+
+static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
+ const u8 *data, size_t len)
+{
+ const struct iwl_ucode_tlv *tlv;
+ void *sec_data;
+
+ IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+ tlv_len);
+ IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+ le32_to_cpu(sku_id->data[0]),
+ le32_to_cpu(sku_id->data[1]),
+ le32_to_cpu(sku_id->data[2]));
+
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+
+ if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ sec_data = iwl_uefi_reduce_power_section(trans,
+ data,
+ len);
+ if (!IS_ERR(sec_data))
+ return sec_data;
+ } else {
+ IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+ }
+ } else {
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+ struct pnvm_sku_package *package;
+ void *data = NULL;
+ unsigned long package_size;
+ efi_status_t status;
+
+ *len = 0;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
+
+ package = kmalloc(package_size, GFP_KERNEL);
+ if (!package)
+ return ERR_PTR(-ENOMEM);
+
+ status = efi.get_variable(IWL_UEFI_REDUCED_POWER_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
+ IWL_DEBUG_FW(trans,
+ "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
+ status, package_size);
+ kfree(package);
+ return ERR_PTR(-ENOENT);
+ }
+
+ IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
+ package_size);
+ *len = package_size;
+
+ IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
+ package->rev, package->total_size, package->n_skus);
+
+ data = iwl_uefi_reduce_power_parse(trans, package->data,
+ *len - sizeof(*package));
+
+ kfree(package);
+
+ return data;
+}
+
+#ifdef CONFIG_ACPI
+static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
+ struct iwl_fw_runtime *fwrt)
+{
+ int i, j;
+
+ if (sgom_data->revision != 1)
+ return -EINVAL;
+
+ memcpy(fwrt->sgom_table.offset_map, sgom_data->offset_map,
+ sizeof(fwrt->sgom_table.offset_map));
+
+ for (i = 0; i < MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE; i++) {
+ for (j = 0; j < MCC_TO_SAR_OFFSET_TABLE_COL_SIZE; j++) {
+ /* since each byte is composed of to values, */
+ /* one for each letter, */
+ /* extract and check each of them separately */
+ u8 value = fwrt->sgom_table.offset_map[i][j];
+ u8 low = value & 0xF;
+ u8 high = (value & 0xF0) >> 4;
+
+ if (high > fwrt->geo_num_profiles)
+ high = 0;
+ if (low > fwrt->geo_num_profiles)
+ low = 0;
+ fwrt->sgom_table.offset_map[i][j] = (high << 4) | low;
+ }
+ }
+
+ fwrt->sgom_enabled = true;
+ return 0;
+}
+
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_wlan_sgom_data *data;
+ unsigned long package_size;
+ efi_status_t status;
+ int ret;
+
+ if (!fwrt->geo_enabled ||
+ !efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return;
+
+ /* TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_SGOM_SIZE;
+
+ data = kmalloc(package_size, GFP_KERNEL);
+ if (!data)
+ return;
+
+ status = efi.get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
+ IWL_DEBUG_FW(trans,
+ "SGOM UEFI variable not found 0x%lx\n", status);
+ goto out_free;
+ }
+
+ IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
+ package_size);
+
+ ret = iwl_uefi_sgom_parse(data, fwrt);
+ if (ret < 0)
+ IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n");
+
+out_free:
+ kfree(data);
+
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table);
+#endif /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
new file mode 100644
index 000000000..09d2a971b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+#ifndef __iwl_fw_uefi__
+#define __iwl_fw_uefi__
+
+#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
+#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
+#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
+
+/*
+ * TODO: we have these hardcoded values that the caller must pass,
+ * because reading from the UEFI is not working. To implement this
+ * properly, we have to change iwl_pnvm_get_from_uefi() to call
+ * efivar_entry_size() and return the value to the caller instead.
+ */
+#define IWL_HARDCODED_PNVM_SIZE 4096
+#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
+#define IWL_HARDCODED_SGOM_SIZE 339
+
+struct pnvm_sku_package {
+ u8 rev;
+ u32 total_size;
+ u8 n_skus;
+ u32 reserved[2];
+ u8 data[];
+} __packed;
+
+struct uefi_cnv_wlan_sgom_data {
+ u8 revision;
+ u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1];
+} __packed;
+
+/*
+ * This is known to be broken on v4.19 and to work on v5.4. Until we
+ * figure out why this is the case and how to make it work, simply
+ * disable the feature in old kernels.
+ */
+#ifdef CONFIG_EFI
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
+#else /* CONFIG_EFI */
+static inline
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_EFI */
+
+#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+#else
+static inline
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
+{
+}
+#endif
+#endif /* __iwl_fw_uefi__ */