summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/wil6210
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/wireless/ath/wil6210
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wireless/ath/wil6210')
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig56
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile25
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h57
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c3312
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c78
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c2511
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c113
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c28
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h207
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c829
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c909
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2014
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c558
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c379
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c688
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c463
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c444
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h15
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c402
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h286
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2589
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h694
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1647
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h604
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1449
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c122
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h81
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c4054
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4221
31 files changed, 28882 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644
index 000000000..f074e9c31
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: ISC
+config WIL6210
+ tristate "Wilocity 60g WiFi card wil6210 support"
+ select WANT_DEV_COREDUMP
+ select CRC32
+ depends on CFG80211
+ depends on PCI
+ default n
+ help
+ This module adds support for wireless adapter based on
+ wil6210 chip by Wilocity. It supports operation on the
+ 60 GHz band, covered by the IEEE802.11ad standard.
+
+ https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
+
+ If you choose to build it as a module, it will be called
+ wil6210
+
+config WIL6210_ISR_COR
+ bool "Use Clear-On-Read mode for ISR registers for wil6210"
+ depends on WIL6210
+ default y
+ help
+ ISR registers on wil6210 chip may operate in either
+ COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+ For production code, use COR (say y); is default since
+ it saves extra target transaction;
+ For ISR debug, use W1C (say n); is allows to monitor ISR
+ registers with debugfs. If COR were used, ISR would
+ self-clear when accessed for debug purposes, it makes
+ such monitoring impossible.
+ Say y unless you debug interrupts
+
+config WIL6210_TRACING
+ bool "wil6210 tracing support"
+ depends on WIL6210
+ depends on EVENT_TRACING
+ default n
+ help
+ Say Y here to enable tracepoints for the wil6210 driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say Y to make it easier to debug problems.
+
+config WIL6210_DEBUGFS
+ bool "wil6210 debugfs support"
+ depends on WIL6210
+ depends on DEBUG_FS
+ default y
+ help
+ Say Y here to enable wil6210 debugfs support, using the
+ kernel debugfs infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644
index 000000000..53a0d995d
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: ISC
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-y := main.o
+wil6210-y += netdev.o
+wil6210-y += cfg80211.o
+wil6210-y += pcie_bus.o
+wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
+wil6210-y += wmi.o
+wil6210-y += interrupt.o
+wil6210-y += txrx.o
+wil6210-y += txrx_edma.o
+wil6210-y += debug.o
+wil6210-y += rx_reorder.o
+wil6210-y += fw.o
+wil6210-y += pm.o
+wil6210-y += pmc.o
+wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
+wil6210-y += wil_platform.o
+wil6210-y += ethtool.o
+wil6210-y += wil_crash_dump.o
+wil6210-y += p2p.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
new file mode 100644
index 000000000..a8a43c25f
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+ __le32 boot_loader_ready; /* 0x880A3C driver will poll
+ * this Dword until BL will
+ * set it to 1 (initial value
+ * should be 0)
+ */
+ __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
+ __le16 rf_type; /* 0x880A44 connected RF ID */
+ __le16 rf_status; /* 0x880A46 RF status,
+ * 0 is OK else error
+ */
+ __le32 baseband_type; /* 0x880A48 board type ID */
+ u8 mac_address[6]; /* 0x880A4c BL mac address */
+ u8 bl_version_major; /* 0x880A52 BL ver. major */
+ u8 bl_version_minor; /* 0x880A53 BL ver. minor */
+ __le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */
+ __le16 bl_version_build; /* 0x880A56 BL ver. build */
+ /* valid only for version 2 and above */
+ __le32 bl_assert_code; /* 0x880A58 BL Assert code */
+ __le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
+ __le32 bl_shutdown_handshake; /* 0x880A60 BL cleaner shutdown */
+ __le32 bl_reserved[21]; /* 0x880A64 - 0x880AB4 */
+ __le32 bl_magic_number; /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+ __le32 boot_loader_ready; /* 0x880A3C driver will poll
+ * this Dword until BL will
+ * set it to 1 (initial value
+ * should be 0)
+ */
+#define BL_READY (1) /* ready indication */
+ __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
+ __le32 rf_type; /* 0x880A44 connected RF ID */
+ __le32 baseband_type; /* 0x880A48 board type ID */
+ u8 mac_address[6]; /* 0x880A4c BL mac address */
+} __packed;
+
+/* bits for bl_shutdown_handshake */
+#define BL_SHUTDOWN_HS_GRTD BIT(0)
+#define BL_SHUTDOWN_HS_RTD BIT(1)
+#define BL_SHUTDOWN_HS_PROT_VER(x) WIL_GET_BITS(x, 28, 31)
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644
index 000000000..40f9a7ef8
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -0,0 +1,3312 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
+#include <net/netlink.h>
+#include <net/cfg80211.h>
+#include "wil6210.h"
+#include "wmi.h"
+#include "fw.h"
+
+#define WIL_MAX_ROC_DURATION_MS 5000
+
+#define WIL_EDMG_CHANNEL_9_SUBCHANNELS (BIT(0) | BIT(1))
+#define WIL_EDMG_CHANNEL_10_SUBCHANNELS (BIT(1) | BIT(2))
+#define WIL_EDMG_CHANNEL_11_SUBCHANNELS (BIT(2) | BIT(3))
+
+/* WIL_EDMG_BW_CONFIGURATION define the allowed channel bandwidth
+ * configurations as defined by IEEE 802.11 section 9.4.2.251, Table 13.
+ * The value 5 allowing CB1 and CB2 of adjacent channels.
+ */
+#define WIL_EDMG_BW_CONFIGURATION 5
+
+/* WIL_EDMG_CHANNELS is a bitmap that indicates the 2.16 GHz channel(s) that
+ * are allowed to be used for EDMG transmissions in the BSS as defined by
+ * IEEE 802.11 section 9.4.2.251.
+ */
+#define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+bool disable_ap_sme;
+module_param(disable_ap_sme, bool, 0444);
+MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
+
+#ifdef CONFIG_PM
+static struct wiphy_wowlan_support wil_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
+#define CHAN60G(_channel, _flags) { \
+ .band = NL80211_BAND_60GHZ, \
+ .center_freq = 56160 + (2160 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 40, \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+ CHAN60G(1, 0),
+ CHAN60G(2, 0),
+ CHAN60G(3, 0),
+ CHAN60G(4, 0),
+};
+
+/* Rx channel bonding mode */
+enum wil_rx_cb_mode {
+ WIL_RX_CB_MODE_DMG,
+ WIL_RX_CB_MODE_EDMG,
+ WIL_RX_CB_MODE_WIDE,
+};
+
+static int wil_rx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+ switch (cb_mode) {
+ case WIL_RX_CB_MODE_DMG:
+ case WIL_RX_CB_MODE_EDMG:
+ return 1;
+ case WIL_RX_CB_MODE_WIDE:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static int wil_tx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+ switch (cb_mode) {
+ case WMI_TX_MODE_DMG:
+ case WMI_TX_MODE_EDMG_CB1:
+ return 1;
+ case WMI_TX_MODE_EDMG_CB2:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static void
+wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
+{
+ kfree(*pdst);
+ *pdst = NULL;
+ *pdst_len = 0;
+ if (src_len > 0) {
+ *pdst = kmemdup(src, src_len, GFP_KERNEL);
+ if (*pdst)
+ *pdst_len = src_len;
+ }
+}
+
+static int wil_num_supported_channels(struct wil6210_priv *wil)
+{
+ int num_channels = ARRAY_SIZE(wil_60ghz_channels);
+
+ if (!test_bit(WMI_FW_CAPABILITY_CHANNEL_4, wil->fw_capabilities))
+ num_channels--;
+
+ return num_channels;
+}
+
+void update_supported_bands(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ wil_dbg_misc(wil, "update supported bands");
+
+ wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
+ wil_num_supported_channels(wil);
+
+ if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
+ wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.channels =
+ WIL_EDMG_CHANNELS;
+ wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.bw_config =
+ WIL_EDMG_BW_CONFIGURATION;
+ }
+}
+
+/* Vendor id to be used in vendor specific command and events
+ * to user space.
+ * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
+ * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
+ * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
+ * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
+ */
+
+#define QCA_NL80211_VENDOR_ID 0x001374
+
+#define WIL_MAX_RF_SECTORS (128)
+#define WIL_CID_ALL (0xff)
+
+enum qca_wlan_vendor_attr_rf_sector {
+ QCA_ATTR_MAC_ADDR = 6,
+ QCA_ATTR_PAD = 13,
+ QCA_ATTR_TSF = 29,
+ QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
+ QCA_ATTR_DMG_RF_MODULE_MASK = 32,
+ QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
+ QCA_ATTR_DMG_RF_SECTOR_MAX,
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_type {
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
+ QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
+ QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+
+ /* keep last */
+ QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
+ QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
+};
+
+static const struct
+nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
+ [QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
+ [QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
+ [QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
+ [QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
+};
+
+static const struct
+nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
+ [QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
+};
+
+enum qca_nl80211_vendor_subcmds {
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
+};
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+
+/* vendor specific commands */
+static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
+ .doit = wil_rf_sector_get_cfg
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
+ .doit = wil_rf_sector_set_cfg
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd =
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
+ .doit = wil_rf_sector_get_selected
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd =
+ QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
+ .doit = wil_rf_sector_set_selected
+ },
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+ .channels = wil_60ghz_channels,
+ .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = 0, /* TODO */
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+ .mcs = {
+ /* MCS 1..12 - SC PHY */
+ .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+ },
+ },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4) |
+ BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_DEVICE] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static const u32 wil_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+};
+
+static const char * const key_usage_str[] = {
+ [WMI_KEY_USE_PAIRWISE] = "PTK",
+ [WMI_KEY_USE_RX_GROUP] = "RX_GTK",
+ [WMI_KEY_USE_TX_GROUP] = "TX_GTK",
+ [WMI_KEY_USE_STORE_PTK] = "STORE_PTK",
+ [WMI_KEY_USE_APPLY_PTK] = "APPLY_PTK",
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+ static const struct {
+ enum nl80211_iftype nl;
+ enum wmi_network_type wmi;
+ } __nl2wmi[] = {
+ {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
+ {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
+ {NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
+ {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
+ };
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+ if (__nl2wmi[i].nl == type)
+ return __nl2wmi[i].wmi;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch)
+{
+ switch (spec_ch) {
+ case 1:
+ *wmi_ch = WMI_CHANNEL_1;
+ break;
+ case 2:
+ *wmi_ch = WMI_CHANNEL_2;
+ break;
+ case 3:
+ *wmi_ch = WMI_CHANNEL_3;
+ break;
+ case 4:
+ *wmi_ch = WMI_CHANNEL_4;
+ break;
+ case 5:
+ *wmi_ch = WMI_CHANNEL_5;
+ break;
+ case 6:
+ *wmi_ch = WMI_CHANNEL_6;
+ break;
+ case 9:
+ *wmi_ch = WMI_CHANNEL_9;
+ break;
+ case 10:
+ *wmi_ch = WMI_CHANNEL_10;
+ break;
+ case 11:
+ *wmi_ch = WMI_CHANNEL_11;
+ break;
+ case 12:
+ *wmi_ch = WMI_CHANNEL_12;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch)
+{
+ switch (wmi_ch) {
+ case WMI_CHANNEL_1:
+ *spec_ch = 1;
+ break;
+ case WMI_CHANNEL_2:
+ *spec_ch = 2;
+ break;
+ case WMI_CHANNEL_3:
+ *spec_ch = 3;
+ break;
+ case WMI_CHANNEL_4:
+ *spec_ch = 4;
+ break;
+ case WMI_CHANNEL_5:
+ *spec_ch = 5;
+ break;
+ case WMI_CHANNEL_6:
+ *spec_ch = 6;
+ break;
+ case WMI_CHANNEL_9:
+ *spec_ch = 9;
+ break;
+ case WMI_CHANNEL_10:
+ *spec_ch = 10;
+ break;
+ case WMI_CHANNEL_11:
+ *spec_ch = 11;
+ break;
+ case WMI_CHANNEL_12:
+ *spec_ch = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
+ struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_notify_req_cmd cmd = {
+ .cid = cid,
+ .interval_usec = 0,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_notify_req_done_event evt;
+ } __packed reply;
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ int rc;
+ u8 tx_mcs, rx_mcs;
+ u8 tx_rate_flag = RATE_INFO_FLAGS_DMG;
+ u8 rx_rate_flag = RATE_INFO_FLAGS_DMG;
+
+ memset(&reply, 0, sizeof(reply));
+
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ tx_mcs = le16_to_cpu(reply.evt.bf_mcs);
+
+ wil_dbg_wmi(wil, "Link status for CID %d MID %d: {\n"
+ " MCS %s TSF 0x%016llx\n"
+ " BF status 0x%08x RSSI %d SQI %d%%\n"
+ " Tx Tpt %d goodput %d Rx goodput %d\n"
+ " Sectors(rx:tx) my %d:%d peer %d:%d\n"
+ " Tx mode %d}\n",
+ cid, vif->mid, WIL_EXTENDED_MCS_CHECK(tx_mcs),
+ le64_to_cpu(reply.evt.tsf), reply.evt.status,
+ reply.evt.rssi,
+ reply.evt.sqi,
+ le32_to_cpu(reply.evt.tx_tpt),
+ le32_to_cpu(reply.evt.tx_goodput),
+ le32_to_cpu(reply.evt.rx_goodput),
+ le16_to_cpu(reply.evt.my_rx_sector),
+ le16_to_cpu(reply.evt.my_tx_sector),
+ le16_to_cpu(reply.evt.other_rx_sector),
+ le16_to_cpu(reply.evt.other_tx_sector),
+ reply.evt.tx_mode);
+
+ sinfo->generation = wil->sinfo_gen;
+
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
+ if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG) {
+ tx_rate_flag = RATE_INFO_FLAGS_EDMG;
+ rx_rate_flag = RATE_INFO_FLAGS_EDMG;
+ }
+
+ rx_mcs = stats->last_mcs_rx;
+
+ /* check extended MCS (12.1) and convert it into
+ * base MCS (7) + EXTENDED_SC_DMG flag
+ */
+ if (tx_mcs == WIL_EXTENDED_MCS_26) {
+ tx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
+ tx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
+ }
+ if (rx_mcs == WIL_EXTENDED_MCS_26) {
+ rx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
+ rx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
+ }
+
+ sinfo->txrate.flags = tx_rate_flag;
+ sinfo->rxrate.flags = rx_rate_flag;
+ sinfo->txrate.mcs = tx_mcs;
+ sinfo->rxrate.mcs = rx_mcs;
+
+ sinfo->txrate.n_bonded_ch =
+ wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
+ sinfo->rxrate.n_bonded_ch =
+ wil_rx_cb_mode_to_n_bonded(stats->last_cb_mode_rx);
+ sinfo->rx_bytes = stats->rx_bytes;
+ sinfo->rx_packets = stats->rx_packets;
+ sinfo->rx_dropped_misc = stats->rx_dropped;
+ sinfo->tx_bytes = stats->tx_bytes;
+ sinfo->tx_packets = stats->tx_packets;
+ sinfo->tx_failed = stats->tx_errors;
+
+ if (test_bit(wil_vif_fwconnected, vif->status)) {
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING,
+ wil->fw_capabilities))
+ sinfo->signal = reply.evt.rssi;
+ else
+ sinfo->signal = reply.evt.sqi;
+ }
+
+ return rc;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ int cid = wil_find_cid(wil, vif->mid, mac);
+
+ wil_dbg_misc(wil, "get_station: %pM CID %d MID %d\n", mac, cid,
+ vif->mid);
+ if (!wil_cid_valid(wil, cid))
+ return -ENOENT;
+
+ rc = wil_cid_fill_sinfo(vif, cid, sinfo);
+
+ return rc;
+}
+
+/*
+ * Find @idx-th active STA for specific MID for station dump.
+ */
+int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
+{
+ int i;
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (wil->sta[i].mid != mid)
+ continue;
+ if (idx == 0)
+ return i;
+ idx--;
+ }
+
+ return -ENOENT;
+}
+
+static int wil_cfg80211_dump_station(struct wiphy *wiphy,
+ struct net_device *dev, int idx,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ int cid = wil_find_cid_by_idx(wil, vif->mid, idx);
+
+ if (!wil_cid_valid(wil, cid))
+ return -ENOENT;
+
+ ether_addr_copy(mac, wil->sta[cid].addr);
+ wil_dbg_misc(wil, "dump_station: %pM CID %d MID %d\n", mac, cid,
+ vif->mid);
+
+ rc = wil_cid_fill_sinfo(vif, cid, sinfo);
+
+ return rc;
+}
+
+static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "start_p2p_device: entered\n");
+ wil->p2p_dev_started = 1;
+ return 0;
+}
+
+static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (!wil->p2p_dev_started)
+ return;
+
+ wil_dbg_misc(wil, "stop_p2p_device: entered\n");
+ mutex_lock(&wil->mutex);
+ mutex_lock(&wil->vif_mutex);
+ wil_p2p_stop_radio_operations(wil);
+ wil->p2p_dev_started = 0;
+ mutex_unlock(&wil->vif_mutex);
+ mutex_unlock(&wil->mutex);
+}
+
+static int wil_cfg80211_validate_add_iface(struct wil6210_priv *wil,
+ enum nl80211_iftype new_type)
+{
+ int i;
+ struct wireless_dev *wdev;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ if (wil->vifs[i]) {
+ wdev = vif_to_wdev(wil->vifs[i]);
+ params.iftype_num[wdev->iftype]++;
+ }
+ }
+ params.iftype_num[new_type]++;
+ return cfg80211_check_combinations(wil->wiphy, &params);
+}
+
+static int wil_cfg80211_validate_change_iface(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ enum nl80211_iftype new_type)
+{
+ int i, ret = 0;
+ struct wireless_dev *wdev;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
+ bool check_combos = false;
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ struct wil6210_vif *vif_pos = wil->vifs[i];
+
+ if (vif_pos && vif != vif_pos) {
+ wdev = vif_to_wdev(vif_pos);
+ params.iftype_num[wdev->iftype]++;
+ check_combos = true;
+ }
+ }
+
+ if (check_combos) {
+ params.iftype_num[new_type]++;
+ ret = cfg80211_check_combinations(wil->wiphy, &params);
+ }
+ return ret;
+}
+
+static struct wireless_dev *
+wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct net_device *ndev_main = wil->main_ndev, *ndev;
+ struct wil6210_vif *vif;
+ struct wireless_dev *p2p_wdev, *wdev;
+ int rc;
+
+ wil_dbg_misc(wil, "add_iface, type %d\n", type);
+
+ /* P2P device is not a real virtual interface, it is a management-only
+ * interface that shares the main interface.
+ * Skip concurrency checks here.
+ */
+ if (type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (wil->p2p_wdev) {
+ wil_err(wil, "P2P_DEVICE interface already created\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
+ if (!p2p_wdev)
+ return ERR_PTR(-ENOMEM);
+
+ p2p_wdev->iftype = type;
+ p2p_wdev->wiphy = wiphy;
+ /* use our primary ethernet address */
+ ether_addr_copy(p2p_wdev->address, ndev_main->perm_addr);
+
+ wil->p2p_wdev = p2p_wdev;
+
+ return p2p_wdev;
+ }
+
+ if (!wil->wiphy->n_iface_combinations) {
+ wil_err(wil, "virtual interfaces not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = wil_cfg80211_validate_add_iface(wil, type);
+ if (rc) {
+ wil_err(wil, "iface validation failed, err=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ vif = wil_vif_alloc(wil, name, name_assign_type, type);
+ if (IS_ERR(vif))
+ return ERR_CAST(vif);
+
+ ndev = vif_to_ndev(vif);
+ ether_addr_copy(ndev->perm_addr, ndev_main->perm_addr);
+ if (is_valid_ether_addr(params->macaddr)) {
+ eth_hw_addr_set(ndev, params->macaddr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ ether_addr_copy(addr, ndev_main->perm_addr);
+ addr[0] = (addr[0] ^ (1 << vif->mid)) | 0x2; /* locally administered */
+ eth_hw_addr_set(ndev, addr);
+ }
+ wdev = vif_to_wdev(vif);
+ ether_addr_copy(wdev->address, ndev->dev_addr);
+
+ rc = wil_vif_add(wil, vif);
+ if (rc)
+ goto out;
+
+ wil_info(wil, "added VIF, mid %d iftype %d MAC %pM\n",
+ vif->mid, type, wdev->address);
+ return wdev;
+out:
+ wil_vif_free(vif);
+ return ERR_PTR(rc);
+}
+
+int wil_vif_prepare_stop(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct net_device *ndev;
+ int rc;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP)
+ return 0;
+
+ ndev = vif_to_ndev(vif);
+ if (netif_carrier_ok(ndev)) {
+ rc = wmi_pcp_stop(vif);
+ if (rc) {
+ wil_info(wil, "failed to stop AP, status %d\n",
+ rc);
+ /* continue */
+ }
+ wil_bcast_fini(vif);
+ netif_carrier_off(ndev);
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_del_iface(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ int rc;
+
+ wil_dbg_misc(wil, "del_iface\n");
+
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ if (wdev != wil->p2p_wdev) {
+ wil_err(wil, "delete of incorrect interface 0x%p\n",
+ wdev);
+ return -EINVAL;
+ }
+
+ wil_cfg80211_stop_p2p_device(wiphy, wdev);
+ wil_p2p_wdev_free(wil);
+ return 0;
+ }
+
+ if (vif->mid == 0) {
+ wil_err(wil, "cannot remove the main interface\n");
+ return -EINVAL;
+ }
+
+ rc = wil_vif_prepare_stop(vif);
+ if (rc)
+ goto out;
+
+ wil_info(wil, "deleted VIF, mid %d iftype %d MAC %pM\n",
+ vif->mid, wdev->iftype, wdev->address);
+
+ wil_vif_remove(wil, vif->mid);
+out:
+ return rc;
+}
+
+static bool wil_is_safe_switch(enum nl80211_iftype from,
+ enum nl80211_iftype to)
+{
+ if (from == NL80211_IFTYPE_STATION &&
+ to == NL80211_IFTYPE_P2P_CLIENT)
+ return true;
+
+ return false;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ int rc;
+ bool fw_reset = false;
+
+ wil_dbg_misc(wil, "change_iface: type=%d\n", type);
+
+ if (wiphy->n_iface_combinations) {
+ rc = wil_cfg80211_validate_change_iface(wil, vif, type);
+ if (rc) {
+ wil_err(wil, "iface validation failed, err=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* do not reset FW when there are active VIFs,
+ * because it can cause significant disruption
+ */
+ if (!wil_has_other_active_ifaces(wil, ndev, true, false) &&
+ netif_running(ndev) && !wil_is_recovery_blocked(wil) &&
+ !wil_is_safe_switch(wdev->iftype, type)) {
+ wil_dbg_misc(wil, "interface is up. resetting...\n");
+ mutex_lock(&wil->mutex);
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (rc)
+ return rc;
+ fw_reset = true;
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (params->flags)
+ wil->monitor_flags = params->flags;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (vif->mid != 0 && wil_has_active_ifaces(wil, true, false)) {
+ if (!fw_reset)
+ wil_vif_prepare_stop(vif);
+ rc = wmi_port_delete(wil, vif->mid);
+ if (rc)
+ return rc;
+ rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr, type);
+ if (rc)
+ return rc;
+ }
+
+ wdev->iftype = type;
+ return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = request->wdev;
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ struct {
+ struct wmi_start_scan_cmd cmd;
+ u16 chnl[4];
+ } __packed cmd;
+ uint i, n;
+ int rc;
+
+ wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
+
+ /* scan is supported on client interfaces and on AP interface */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* FW don't support scan after connection attempt */
+ if (test_bit(wil_status_dontscan, wil->status)) {
+ wil_err(wil, "Can't scan now\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&wil->mutex);
+
+ mutex_lock(&wil->vif_mutex);
+ if (vif->scan_request || vif->p2p.discovery_started) {
+ wil_err(wil, "Already scanning\n");
+ mutex_unlock(&wil->vif_mutex);
+ rc = -EAGAIN;
+ goto out;
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ if (!wil->p2p_dev_started) {
+ wil_err(wil, "P2P search requested on stopped P2P device\n");
+ rc = -EIO;
+ goto out;
+ }
+ /* social scan on P2P_DEVICE is handled as p2p search */
+ if (wil_p2p_is_social_scan(request)) {
+ vif->scan_request = request;
+ if (vif->mid == 0)
+ wil->radio_wdev = wdev;
+ rc = wil_p2p_search(vif, request);
+ if (rc) {
+ if (vif->mid == 0)
+ wil->radio_wdev =
+ wil->main_ndev->ieee80211_ptr;
+ vif->scan_request = NULL;
+ }
+ goto out;
+ }
+ }
+
+ (void)wil_p2p_stop_discovery(vif);
+
+ wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
+ wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
+
+ for (i = 0; i < request->n_ssids; i++) {
+ wil_dbg_misc(wil, "SSID[%d]", i);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
+ }
+
+ if (request->n_ssids)
+ rc = wmi_set_ssid(vif, request->ssids[0].ssid_len,
+ request->ssids[0].ssid);
+ else
+ rc = wmi_set_ssid(vif, 0, NULL);
+
+ if (rc) {
+ wil_err(wil, "set SSID for scan request failed: %d\n", rc);
+ goto out;
+ }
+
+ vif->scan_request = request;
+ mod_timer(&vif->scan_timer, jiffies + WIL6210_SCAN_TO);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
+ cmd.cmd.num_channels = 0;
+ n = min(request->n_channels, 4U);
+ for (i = 0; i < n; i++) {
+ int ch = request->channels[i]->hw_value;
+
+ if (ch == 0) {
+ wil_err(wil,
+ "Scan requested for unknown frequency %dMhz\n",
+ request->channels[i]->center_freq);
+ continue;
+ }
+ /* 0-based channel indexes */
+ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
+ }
+
+ if (request->ie_len)
+ wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ie, request->ie_len, true);
+ else
+ wil_dbg_misc(wil, "Scan has no IE's\n");
+
+ rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ,
+ request->ie_len, request->ie);
+ if (rc)
+ goto out_restore;
+
+ if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
+ cmd.cmd.discovery_mode = 1;
+ wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
+ }
+
+ if (vif->mid == 0)
+ wil->radio_wdev = wdev;
+ rc = wmi_send(wil, WMI_START_SCAN_CMDID, vif->mid,
+ &cmd, sizeof(cmd.cmd) +
+ cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+
+out_restore:
+ if (rc) {
+ del_timer_sync(&vif->scan_timer);
+ if (vif->mid == 0)
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ vif->scan_request = NULL;
+ }
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+static void wil_cfg80211_abort_scan(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+
+ wil_dbg_misc(wil, "wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
+
+ mutex_lock(&wil->mutex);
+ mutex_lock(&wil->vif_mutex);
+
+ if (!vif->scan_request)
+ goto out;
+
+ if (wdev != vif->scan_request->wdev) {
+ wil_dbg_misc(wil, "abort scan was called on the wrong iface\n");
+ goto out;
+ }
+
+ if (wdev == wil->p2p_wdev && wil->radio_wdev == wil->p2p_wdev)
+ wil_p2p_stop_radio_operations(wil);
+ else
+ wil_abort_scan(vif, true);
+
+out:
+ mutex_unlock(&wil->vif_mutex);
+ mutex_unlock(&wil->mutex);
+}
+
+static void wil_print_crypto(struct wil6210_priv *wil,
+ struct cfg80211_crypto_settings *c)
+{
+ int i, n;
+
+ wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
+ c->wpa_versions, c->cipher_group);
+ wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise);
+ n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise));
+ for (i = 0; i < n; i++)
+ wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
+ c->ciphers_pairwise[i]);
+ wil_dbg_misc(wil, "}\n");
+ wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites);
+ n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites));
+ for (i = 0; i < n; i++)
+ wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
+ c->akm_suites[i]);
+ wil_dbg_misc(wil, "}\n");
+ wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
+ c->control_port, be16_to_cpu(c->control_port_ethertype),
+ c->control_port_no_encrypt);
+}
+
+static const char *
+wil_get_auth_type_name(enum nl80211_auth_type auth_type)
+{
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ return "OPEN_SYSTEM";
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ return "SHARED_KEY";
+ case NL80211_AUTHTYPE_FT:
+ return "FT";
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ return "NETWORK_EAP";
+ case NL80211_AUTHTYPE_SAE:
+ return "SAE";
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ return "AUTOMATIC";
+ default:
+ return "unknown";
+ }
+}
+static void wil_print_connect_params(struct wil6210_priv *wil,
+ struct cfg80211_connect_params *sme)
+{
+ wil_info(wil, "Connecting to:\n");
+ if (sme->channel) {
+ wil_info(wil, " Channel: %d freq %d\n",
+ sme->channel->hw_value, sme->channel->center_freq);
+ }
+ if (sme->bssid)
+ wil_info(wil, " BSSID: %pM\n", sme->bssid);
+ if (sme->ssid)
+ print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
+ 16, 1, sme->ssid, sme->ssid_len, true);
+ if (sme->prev_bssid)
+ wil_info(wil, " Previous BSSID=%pM\n", sme->prev_bssid);
+ wil_info(wil, " Auth Type: %s\n",
+ wil_get_auth_type_name(sme->auth_type));
+ wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
+ wil_info(wil, " PBSS: %d\n", sme->pbss);
+ wil_print_crypto(wil, &sme->crypto);
+}
+
+static int wil_ft_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wmi_ft_auth_cmd auth_cmd;
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
+ wil_err(wil, "FT: FW does not support FT roaming\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!sme->prev_bssid) {
+ wil_err(wil, "FT: prev_bssid was not set\n");
+ return -EINVAL;
+ }
+
+ if (ether_addr_equal(sme->prev_bssid, sme->bssid)) {
+ wil_err(wil, "FT: can not roam to same AP\n");
+ return -EINVAL;
+ }
+
+ if (!test_bit(wil_vif_fwconnected, vif->status)) {
+ wil_err(wil, "FT: roam while not connected\n");
+ return -EINVAL;
+ }
+
+ if (vif->privacy != sme->privacy) {
+ wil_err(wil, "FT: privacy mismatch, current (%d) roam (%d)\n",
+ vif->privacy, sme->privacy);
+ return -EINVAL;
+ }
+
+ if (sme->pbss) {
+ wil_err(wil, "FT: roam is not valid for PBSS\n");
+ return -EINVAL;
+ }
+
+ memset(&auth_cmd, 0, sizeof(auth_cmd));
+ auth_cmd.channel = sme->channel->hw_value - 1;
+ ether_addr_copy(auth_cmd.bssid, sme->bssid);
+
+ wil_info(wil, "FT: roaming\n");
+
+ set_bit(wil_vif_ft_roam, vif->status);
+ rc = wmi_send(wil, WMI_FT_AUTH_CMDID, vif->mid,
+ &auth_cmd, sizeof(auth_cmd));
+ if (rc == 0)
+ mod_timer(&vif->connect_timer,
+ jiffies + msecs_to_jiffies(5000));
+ else
+ clear_bit(wil_vif_ft_roam, vif->status);
+
+ return rc;
+}
+
+static int wil_get_wmi_edmg_channel(struct wil6210_priv *wil, u8 edmg_bw_config,
+ u8 edmg_channels, u8 *wmi_ch)
+{
+ if (!edmg_bw_config) {
+ *wmi_ch = 0;
+ return 0;
+ } else if (edmg_bw_config == WIL_EDMG_BW_CONFIGURATION) {
+ /* convert from edmg channel bitmap into edmg channel number */
+ switch (edmg_channels) {
+ case WIL_EDMG_CHANNEL_9_SUBCHANNELS:
+ return wil_spec2wmi_ch(9, wmi_ch);
+ case WIL_EDMG_CHANNEL_10_SUBCHANNELS:
+ return wil_spec2wmi_ch(10, wmi_ch);
+ case WIL_EDMG_CHANNEL_11_SUBCHANNELS:
+ return wil_spec2wmi_ch(11, wmi_ch);
+ default:
+ wil_err(wil, "Unsupported edmg channel bitmap 0x%x\n",
+ edmg_channels);
+ return -EINVAL;
+ }
+ } else {
+ wil_err(wil, "Unsupported EDMG BW configuration %d\n",
+ edmg_bw_config);
+ return -EINVAL;
+ }
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct cfg80211_bss *bss;
+ struct wmi_connect_cmd conn;
+ const u8 *ssid_eid;
+ const u8 *rsn_eid;
+ int ch;
+ int rc = 0;
+ bool is_ft_roam = false;
+ u8 network_type;
+ enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
+
+ wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid);
+ wil_print_connect_params(wil, sme);
+
+ if (sme->auth_type == NL80211_AUTHTYPE_FT)
+ is_ft_roam = true;
+ if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC &&
+ test_bit(wil_vif_fwconnected, vif->status))
+ is_ft_roam = true;
+
+ if (!is_ft_roam)
+ if (test_bit(wil_vif_fwconnecting, vif->status) ||
+ test_bit(wil_vif_fwconnected, vif->status))
+ return -EALREADY;
+
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
+ return -ERANGE;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+ if (sme->privacy && !rsn_eid) {
+ wil_info(wil, "WSC connection\n");
+ if (is_ft_roam) {
+ wil_err(wil, "No WSC with FT roam\n");
+ return -EINVAL;
+ }
+ }
+
+ if (sme->pbss)
+ bss_type = IEEE80211_BSS_TYPE_PBSS;
+
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ bss_type, IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ return -ENOENT;
+ }
+
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (!ssid_eid) {
+ wil_err(wil, "No SSID\n");
+ rc = -ENOENT;
+ goto out;
+ }
+ vif->privacy = sme->privacy;
+ vif->pbss = sme->pbss;
+
+ rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc)
+ goto out;
+
+ switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (is_ft_roam) {
+ if (network_type != WMI_NETTYPE_INFRA) {
+ wil_err(wil, "FT: Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = wil_ft_connect(wiphy, ndev, sme);
+ if (rc == 0)
+ vif->bss = bss;
+ goto out;
+ }
+
+ if (vif->privacy) {
+ /* For secure assoc, remove old keys */
+ rc = wmi_del_cipher_key(vif, 0, bss->bssid,
+ WMI_KEY_USE_PAIRWISE);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+ goto out;
+ }
+ rc = wmi_del_cipher_key(vif, 0, bss->bssid,
+ WMI_KEY_USE_RX_GROUP);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
+ goto out;
+ }
+ }
+
+ /* WMI_CONNECT_CMD */
+ memset(&conn, 0, sizeof(conn));
+ conn.network_type = network_type;
+ if (vif->privacy) {
+ if (rsn_eid) { /* regular secure connection */
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.group_crypto_len = 16;
+ } else { /* WSC */
+ conn.dot11_auth_mode = WMI_AUTH11_WSC;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+ } else { /* insecure connection */
+ conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+
+ conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+ memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+ conn.channel = ch - 1;
+
+ rc = wil_get_wmi_edmg_channel(wil, sme->edmg.bw_config,
+ sme->edmg.channels, &conn.edmg_channel);
+ if (rc < 0)
+ return rc;
+
+ ether_addr_copy(conn.bssid, bss->bssid);
+ ether_addr_copy(conn.dst_mac, bss->bssid);
+
+ set_bit(wil_vif_fwconnecting, vif->status);
+
+ rc = wmi_send(wil, WMI_CONNECT_CMDID, vif->mid, &conn, sizeof(conn));
+ if (rc == 0) {
+ netif_carrier_on(ndev);
+ if (!wil_has_other_active_ifaces(wil, ndev, false, true))
+ wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+ vif->bss = bss;
+ /* Connect can take lots of time */
+ mod_timer(&vif->connect_timer,
+ jiffies + msecs_to_jiffies(5000));
+ } else {
+ clear_bit(wil_vif_fwconnecting, vif->status);
+ }
+
+ out:
+ cfg80211_put_bss(wiphy, bss);
+
+ return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u16 reason_code)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+
+ wil_dbg_misc(wil, "disconnect: reason=%d, mid=%d\n",
+ reason_code, vif->mid);
+
+ if (!(test_bit(wil_vif_fwconnecting, vif->status) ||
+ test_bit(wil_vif_fwconnected, vif->status))) {
+ wil_err(wil, "Disconnect was called while disconnected\n");
+ return 0;
+ }
+
+ vif->locally_generated_disc = true;
+ rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0,
+ WMI_DISCONNECT_EVENTID, NULL, 0,
+ WIL6210_DISCONNECT_TO_MS);
+ if (rc)
+ wil_err(wil, "disconnect error %d\n", rc);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* these parameters are explicitly not supported */
+ if (changed & (WIPHY_PARAM_RETRY_LONG |
+ WIPHY_PARAM_FRAG_THRESHOLD |
+ WIPHY_PARAM_RTS_THRESHOLD))
+ return -ENOTSUPP;
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT) {
+ rc = wmi_set_mgmt_retry(wil, wiphy->retry_short);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
+{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ int rc;
+ bool tx_status;
+
+ wil_dbg_misc(wil, "mgmt_tx: channel %d offchan %d, wait %d\n",
+ params->chan ? params->chan->hw_value : -1,
+ params->offchan,
+ params->wait);
+
+ /* Note, currently we support the "wait" parameter only on AP mode.
+ * In other modes, user-space must call remain_on_channel before
+ * mgmt_tx or listen on a channel other than active one.
+ */
+
+ if (params->chan && params->chan->hw_value == 0) {
+ wil_err(wil, "invalid channel\n");
+ return -EINVAL;
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_AP) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID on non-AP interfaces\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (!params->chan || params->chan->hw_value == vif->channel) {
+ wil_dbg_misc(wil,
+ "send WMI_SW_TX_REQ_CMDID for on-channel\n");
+ rc = wmi_mgmt_tx(vif, buf, len);
+ goto out;
+ }
+
+ if (params->offchan == 0) {
+ wil_err(wil,
+ "invalid channel params: current %d requested %d, off-channel not allowed\n",
+ vif->channel, params->chan->hw_value);
+ return -EBUSY;
+ }
+
+ /* use the wmi_mgmt_tx_ext only on AP mode and off-channel */
+ rc = wmi_mgmt_tx_ext(vif, buf, len, params->chan->hw_value,
+ params->wait);
+
+out:
+ /* when the sent packet was not acked by receiver(ACK=0), rc will
+ * be -EAGAIN. In this case this function needs to return success,
+ * the ACK=0 will be reflected in tx_status.
+ */
+ tx_status = (rc == 0);
+ rc = (rc == -EAGAIN) ? 0 : rc;
+ cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
+ tx_status, GFP_KERNEL);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil->monitor_chandef = *chandef;
+
+ return 0;
+}
+
+static enum wmi_key_usage wil_detect_key_usage(struct wireless_dev *wdev,
+ bool pairwise)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ enum wmi_key_usage rc;
+
+ if (pairwise) {
+ rc = WMI_KEY_USE_PAIRWISE;
+ } else {
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ rc = WMI_KEY_USE_RX_GROUP;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ rc = WMI_KEY_USE_TX_GROUP;
+ break;
+ default:
+ /* TODO: Rx GTK or Tx GTK? */
+ wil_err(wil, "Can't determine GTK type\n");
+ rc = WMI_KEY_USE_RX_GROUP;
+ break;
+ }
+ }
+ wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]);
+
+ return rc;
+}
+
+static struct wil_sta_info *
+wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid,
+ enum wmi_key_usage key_usage, const u8 *mac_addr)
+{
+ int cid = -EINVAL;
+
+ if (key_usage == WMI_KEY_USE_TX_GROUP)
+ return NULL; /* not needed */
+
+ /* supplicant provides Rx group key in STA mode with NULL MAC address */
+ if (mac_addr)
+ cid = wil_find_cid(wil, mid, mac_addr);
+ else if (key_usage == WMI_KEY_USE_RX_GROUP)
+ cid = wil_find_cid_by_idx(wil, mid, 0);
+ if (cid < 0) {
+ wil_err(wil, "No CID for %pM %s\n", mac_addr,
+ key_usage_str[key_usage]);
+ return ERR_PTR(cid);
+ }
+
+ return &wil->sta[cid];
+}
+
+void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
+
+ switch (key_usage) {
+ case WMI_KEY_USE_STORE_PTK:
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq,
+ IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
+
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ cc->key_set = false;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ cc->key_set = false;
+ break;
+ default:
+ break;
+ }
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+ struct net_device *ndev, int link_id,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ int rc;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ enum wmi_key_usage key_usage = wil_detect_key_usage(wdev, pairwise);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid,
+ key_usage,
+ mac_addr);
+
+ if (!params) {
+ wil_err(wil, "NULL params\n");
+ return -EINVAL;
+ }
+
+ wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n",
+ mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+
+ if (IS_ERR(cs)) {
+ /* in FT, sta info may not be available as add_key may be
+ * sent by host before FW sends WMI_CONNECT_EVENT
+ */
+ if (!test_bit(wil_vif_ft_roam, vif->status)) {
+ wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+ mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
+ } else {
+ wil_del_rx_key(key_index, key_usage, cs);
+ }
+
+ if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
+ wil_err(wil,
+ "Wrong PN len %d, %pM %s[%d] PN %*phN\n",
+ params->seq_len, mac_addr,
+ key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&wil->eap_lock);
+ if (pairwise && wdev->iftype == NL80211_IFTYPE_STATION &&
+ (vif->ptk_rekey_state == WIL_REKEY_M3_RECEIVED ||
+ vif->ptk_rekey_state == WIL_REKEY_WAIT_M4_SENT)) {
+ key_usage = WMI_KEY_USE_STORE_PTK;
+ vif->ptk_rekey_state = WIL_REKEY_WAIT_M4_SENT;
+ wil_dbg_misc(wil, "Store EAPOL key\n");
+ }
+ spin_unlock_bh(&wil->eap_lock);
+
+ rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
+ params->key, key_usage);
+ if (!rc && !IS_ERR(cs)) {
+ /* update local storage used for AP recovery */
+ if (key_usage == WMI_KEY_USE_TX_GROUP && params->key &&
+ params->key_len <= WMI_MAX_KEY_LEN) {
+ vif->gtk_index = key_index;
+ memcpy(vif->gtk, params->key, params->key_len);
+ vif->gtk_len = params->key_len;
+ }
+ /* in FT set crypto will take place upon receiving
+ * WMI_RING_EN_EVENTID event
+ */
+ wil_set_crypto_rx(key_index, key_usage, cs, params);
+ }
+
+ return rc;
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+ struct net_device *ndev, int link_id,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ enum wmi_key_usage key_usage = wil_detect_key_usage(wdev, pairwise);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid,
+ key_usage,
+ mac_addr);
+
+ wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr,
+ key_usage_str[key_usage], key_index);
+
+ if (IS_ERR(cs))
+ wil_info(wil, "Not connected, %pM %s[%d]\n",
+ mac_addr, key_usage_str[key_usage], key_index);
+
+ if (!IS_ERR_OR_NULL(cs))
+ wil_del_rx_key(key_index, key_usage, cs);
+
+ return wmi_del_cipher_key(vif, key_index, mac_addr, key_usage);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev, int link_id,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "set_default_key: entered\n");
+ return 0;
+}
+
+static int wil_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_dbg_misc(wil,
+ "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n",
+ chan->center_freq, duration, wdev->iftype);
+
+ rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
+ return rc;
+}
+
+static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+
+ wil_dbg_misc(wil, "cancel_remain_on_channel\n");
+
+ return wil_p2p_cancel_listen(vif, cookie);
+}
+
+/*
+ * find a specific IE in a list of IEs
+ * return a pointer to the beginning of IE in the list
+ * or NULL if not found
+ */
+static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie,
+ u16 ie_len)
+{
+ struct ieee80211_vendor_ie *vie;
+ u32 oui;
+
+ /* IE tag at offset 0, length at offset 1 */
+ if (ie_len < 2 || 2 + ie[1] > ie_len)
+ return NULL;
+
+ if (ie[0] != WLAN_EID_VENDOR_SPECIFIC)
+ return cfg80211_find_ie(ie[0], ies, ies_len);
+
+ /* make sure there is room for 3 bytes OUI + 1 byte OUI type */
+ if (ie[1] < 4)
+ return NULL;
+ vie = (struct ieee80211_vendor_ie *)ie;
+ oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2];
+ return cfg80211_find_vendor_ie(oui, vie->oui_type, ies,
+ ies_len);
+}
+
+/*
+ * merge the IEs in two lists into a single list.
+ * do not include IEs from the second list which exist in the first list.
+ * add only vendor specific IEs from second list to keep
+ * the merged list sorted (since vendor-specific IE has the
+ * highest tag number)
+ * caller must free the allocated memory for merged IEs
+ */
+static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
+ const u8 *ies2, u16 ies2_len,
+ u8 **merged_ies, u16 *merged_len)
+{
+ u8 *buf, *dpos;
+ const u8 *spos;
+
+ if (!ies1)
+ ies1_len = 0;
+
+ if (!ies2)
+ ies2_len = 0;
+
+ if (ies1_len == 0 && ies2_len == 0) {
+ *merged_ies = NULL;
+ *merged_len = 0;
+ return 0;
+ }
+
+ buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (ies1)
+ memcpy(buf, ies1, ies1_len);
+ dpos = buf + ies1_len;
+ spos = ies2;
+ while (spos && (spos + 1 < ies2 + ies2_len)) {
+ /* IE tag at offset 0, length at offset 1 */
+ u16 ielen = 2 + spos[1];
+
+ if (spos + ielen > ies2 + ies2_len)
+ break;
+ if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
+ spos, ielen))) {
+ memcpy(dpos, spos, ielen);
+ dpos += ielen;
+ }
+ spos += ielen;
+ }
+
+ *merged_ies = buf;
+ *merged_len = dpos - buf;
+ return 0;
+}
+
+static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
+{
+ wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->head, b->head_len, true);
+ wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->tail, b->tail_len, true);
+ wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->beacon_ies, b->beacon_ies_len, true);
+ wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->probe_resp, b->probe_resp_len, true);
+ wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->proberesp_ies, b->proberesp_ies_len, true);
+ wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->assocresp_ies, b->assocresp_ies_len, true);
+}
+
+/* internal functions for device reset and starting AP */
+static u8 *
+_wil_cfg80211_get_proberesp_ies(const u8 *proberesp, u16 proberesp_len,
+ u16 *ies_len)
+{
+ u8 *ies = NULL;
+
+ if (proberesp) {
+ struct ieee80211_mgmt *f =
+ (struct ieee80211_mgmt *)proberesp;
+ size_t hlen = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+
+ ies = f->u.probe_resp.variable;
+ if (ies_len)
+ *ies_len = proberesp_len - hlen;
+ }
+
+ return ies;
+}
+
+static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
+ struct cfg80211_beacon_data *bcon)
+{
+ int rc;
+ u16 len = 0, proberesp_len = 0;
+ u8 *ies = NULL, *proberesp;
+
+ /* update local storage used for AP recovery */
+ wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, bcon->probe_resp,
+ bcon->probe_resp_len);
+ wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len,
+ bcon->proberesp_ies, bcon->proberesp_ies_len);
+ wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len,
+ bcon->assocresp_ies, bcon->assocresp_ies_len);
+
+ proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+ bcon->probe_resp_len,
+ &proberesp_len);
+ rc = _wil_cfg80211_merge_extra_ies(proberesp,
+ proberesp_len,
+ bcon->proberesp_ies,
+ bcon->proberesp_ies_len,
+ &ies, &len);
+
+ if (rc)
+ goto out;
+
+ rc = wmi_set_ie(vif, WMI_FRAME_PROBE_RESP, len, ies);
+ if (rc)
+ goto out;
+
+ if (bcon->assocresp_ies)
+ rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_RESP,
+ bcon->assocresp_ies_len, bcon->assocresp_ies);
+ else
+ rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_RESP, len, ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+ if (rc)
+ goto out;
+
+ rc = wmi_set_ie(vif, WMI_FRAME_BEACON,
+ bcon->tail_len, bcon->tail);
+#endif
+out:
+ kfree(ies);
+ return rc;
+}
+
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *ssid, size_t ssid_len, u32 privacy,
+ int bi, u8 chan, u8 wmi_edmg_channel,
+ struct cfg80211_beacon_data *bcon,
+ u8 hidden_ssid, u32 pbss)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+ u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+ u16 proberesp_len = 0;
+ u8 *proberesp;
+ bool ft = false;
+
+ if (pbss)
+ wmi_nettype = WMI_NETTYPE_P2P;
+
+ wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go);
+ if (is_go && !pbss) {
+ wil_err(wil, "P2P GO must be in PBSS\n");
+ return -ENOTSUPP;
+ }
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+
+ proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+ bcon->probe_resp_len,
+ &proberesp_len);
+ /* check that the probe response IEs has a MDE */
+ if ((proberesp && proberesp_len > 0 &&
+ cfg80211_find_ie(WLAN_EID_MOBILITY_DOMAIN,
+ proberesp,
+ proberesp_len)))
+ ft = true;
+
+ if (ft) {
+ if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING,
+ wil->fw_capabilities)) {
+ wil_err(wil, "FW does not support FT roaming\n");
+ return -ENOTSUPP;
+ }
+ set_bit(wil_vif_ft_roam, vif->status);
+ }
+
+ mutex_lock(&wil->mutex);
+
+ if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ if (rc)
+ goto out;
+ }
+
+ rc = wmi_set_ssid(vif, ssid_len, ssid);
+ if (rc)
+ goto out;
+
+ rc = _wil_cfg80211_set_ies(vif, bcon);
+ if (rc)
+ goto out;
+
+ vif->privacy = privacy;
+ vif->channel = chan;
+ vif->wmi_edmg_channel = wmi_edmg_channel;
+ vif->hidden_ssid = hidden_ssid;
+ vif->pbss = pbss;
+ vif->bi = bi;
+ memcpy(vif->ssid, ssid, ssid_len);
+ vif->ssid_len = ssid_len;
+
+ netif_carrier_on(ndev);
+ if (!wil_has_other_active_ifaces(wil, ndev, false, true))
+ wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+
+ rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, wmi_edmg_channel,
+ hidden_ssid, is_go);
+ if (rc)
+ goto err_pcp_start;
+
+ rc = wil_bcast_init(vif);
+ if (rc)
+ goto err_bcast;
+
+ goto out; /* success */
+
+err_bcast:
+ wmi_pcp_stop(vif);
+err_pcp_start:
+ netif_carrier_off(ndev);
+ if (!wil_has_other_active_ifaces(wil, ndev, false, true))
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
+{
+ int rc, i;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ struct wil6210_vif *vif = wil->vifs[i];
+ struct net_device *ndev;
+ struct cfg80211_beacon_data bcon = {};
+ struct key_params key_params = {};
+
+ if (!vif || vif->ssid_len == 0)
+ continue;
+
+ ndev = vif_to_ndev(vif);
+ bcon.proberesp_ies = vif->proberesp_ies;
+ bcon.assocresp_ies = vif->assocresp_ies;
+ bcon.probe_resp = vif->proberesp;
+ bcon.proberesp_ies_len = vif->proberesp_ies_len;
+ bcon.assocresp_ies_len = vif->assocresp_ies_len;
+ bcon.probe_resp_len = vif->proberesp_len;
+
+ wil_info(wil,
+ "AP (vif %d) recovery: privacy %d, bi %d, channel %d, hidden %d, pbss %d\n",
+ i, vif->privacy, vif->bi, vif->channel,
+ vif->hidden_ssid, vif->pbss);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ vif->ssid, vif->ssid_len, true);
+ rc = _wil_cfg80211_start_ap(wiphy, ndev,
+ vif->ssid, vif->ssid_len,
+ vif->privacy, vif->bi,
+ vif->channel,
+ vif->wmi_edmg_channel, &bcon,
+ vif->hidden_ssid, vif->pbss);
+ if (rc) {
+ wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
+ continue;
+ }
+
+ if (!vif->privacy || vif->gtk_len == 0)
+ continue;
+
+ key_params.key = vif->gtk;
+ key_params.key_len = vif->gtk_len;
+ key_params.seq_len = IEEE80211_GCMP_PN_LEN;
+ rc = wil_cfg80211_add_key(wiphy, ndev, -1, vif->gtk_index,
+ false, NULL, &key_params);
+ if (rc)
+ wil_err(wil, "vif %d recovery add key failed (%d)\n",
+ i, rc);
+ }
+}
+
+static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_beacon_data *bcon)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ u32 privacy = 0;
+
+ wil_dbg_misc(wil, "change_beacon, mid=%d\n", vif->mid);
+ wil_print_bcon_data(bcon);
+
+ if (bcon->tail &&
+ cfg80211_find_ie(WLAN_EID_RSN, bcon->tail,
+ bcon->tail_len))
+ privacy = 1;
+
+ memcpy(vif->ssid, wdev->u.ap.ssid, wdev->u.ap.ssid_len);
+ vif->ssid_len = wdev->u.ap.ssid_len;
+
+ /* in case privacy has changed, need to restart the AP */
+ if (vif->privacy != privacy) {
+ wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+ vif->privacy, privacy);
+
+ rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
+ vif->ssid_len, privacy,
+ wdev->links[0].ap.beacon_interval,
+ vif->channel,
+ vif->wmi_edmg_channel, bcon,
+ vif->hidden_ssid,
+ vif->pbss);
+ } else {
+ rc = _wil_cfg80211_set_ies(vif, bcon);
+ }
+
+ return rc;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_ap_settings *info)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct ieee80211_channel *channel = info->chandef.chan;
+ struct cfg80211_beacon_data *bcon = &info->beacon;
+ struct cfg80211_crypto_settings *crypto = &info->crypto;
+ u8 wmi_edmg_channel;
+ u8 hidden_ssid;
+
+ wil_dbg_misc(wil, "start_ap\n");
+
+ rc = wil_get_wmi_edmg_channel(wil, info->chandef.edmg.bw_config,
+ info->chandef.edmg.channels,
+ &wmi_edmg_channel);
+ if (rc < 0)
+ return rc;
+
+ if (!channel) {
+ wil_err(wil, "AP: No channel???\n");
+ return -EINVAL;
+ }
+
+ switch (info->hidden_ssid) {
+ case NL80211_HIDDEN_SSID_NOT_IN_USE:
+ hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
+ break;
+
+ case NL80211_HIDDEN_SSID_ZERO_LEN:
+ hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
+ break;
+
+ case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+ hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
+ break;
+
+ default:
+ wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+ return -EOPNOTSUPP;
+ }
+ wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
+ wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
+ info->privacy, info->auth_type);
+ wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
+ info->hidden_ssid);
+ wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
+ info->dtim_period);
+ wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ info->ssid, info->ssid_len, true);
+ wil_print_bcon_data(bcon);
+ wil_print_crypto(wil, crypto);
+
+ rc = _wil_cfg80211_start_ap(wiphy, ndev,
+ info->ssid, info->ssid_len, info->privacy,
+ info->beacon_interval, channel->hw_value,
+ wmi_edmg_channel, bcon, hidden_ssid,
+ info->pbss);
+
+ return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ unsigned int link_id)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ bool last;
+
+ wil_dbg_misc(wil, "stop_ap, mid=%d\n", vif->mid);
+
+ netif_carrier_off(ndev);
+ last = !wil_has_other_active_ifaces(wil, ndev, false, true);
+ if (last) {
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ set_bit(wil_status_resetting, wil->status);
+ }
+
+ mutex_lock(&wil->mutex);
+
+ wmi_pcp_stop(vif);
+ clear_bit(wil_vif_ft_roam, vif->status);
+ vif->ssid_len = 0;
+ wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, NULL, 0);
+ wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len, NULL, 0);
+ wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len, NULL, 0);
+ memset(vif->gtk, 0, WMI_MAX_KEY_LEN);
+ vif->gtk_len = 0;
+
+ if (last)
+ __wil_down(wil);
+ else
+ wil_bcast_fini(vif);
+
+ mutex_unlock(&wil->mutex);
+
+ return 0;
+}
+
+static int wil_cfg80211_add_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+{
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "add station %pM aid %d mid %d mask 0x%x set 0x%x\n",
+ mac, params->aid, vif->mid,
+ params->sta_flags_mask, params->sta_flags_set);
+
+ if (!disable_ap_sme) {
+ wil_err(wil, "not supported with AP SME enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (params->aid > WIL_MAX_DMG_AID) {
+ wil_err(wil, "invalid aid\n");
+ return -EINVAL;
+ }
+
+ return wmi_new_sta(vif, mac, params->aid);
+}
+
+static int wil_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct station_del_parameters *params)
+{
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "del_station: %pM, reason=%d mid=%d\n",
+ params->mac, params->reason_code, vif->mid);
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(vif, params->mac, params->reason_code);
+ mutex_unlock(&wil->mutex);
+
+ return 0;
+}
+
+static int wil_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+{
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int authorize;
+ int cid, i;
+ struct wil_ring_tx_data *txdata = NULL;
+
+ wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
+ mac, params->sta_flags_mask, params->sta_flags_set,
+ vif->mid);
+
+ if (!disable_ap_sme) {
+ wil_dbg_misc(wil, "not supported with AP SME enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return 0;
+
+ cid = wil_find_cid(wil, vif->mid, mac);
+ if (cid < 0) {
+ wil_err(wil, "station not found\n");
+ return -ENOLINK;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
+ if (wil->ring2cid_tid[i][0] == cid) {
+ txdata = &wil->ring_tx_data[i];
+ break;
+ }
+
+ if (!txdata) {
+ wil_err(wil, "ring data not found\n");
+ return -ENOLINK;
+ }
+
+ authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
+ txdata->dot1x_open = authorize ? 1 : 0;
+ wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
+ txdata->dot1x_open);
+
+ return 0;
+}
+
+/* probe_client handling */
+static void wil_probe_client_handle(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct wil_probe_client_req *req)
+{
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wil_sta_info *sta = &wil->sta[req->cid];
+ /* assume STA is alive if it is still connected,
+ * else FW will disconnect it
+ */
+ bool alive = (sta->status == wil_sta_connected);
+
+ cfg80211_probe_status(ndev, sta->addr, req->cookie, alive,
+ 0, false, GFP_KERNEL);
+}
+
+static struct list_head *next_probe_client(struct wil6210_vif *vif)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&vif->probe_client_mutex);
+
+ if (!list_empty(&vif->probe_client_pending)) {
+ ret = vif->probe_client_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&vif->probe_client_mutex);
+
+ return ret;
+}
+
+void wil_probe_client_worker(struct work_struct *work)
+{
+ struct wil6210_vif *vif = container_of(work, struct wil6210_vif,
+ probe_client_worker);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_probe_client_req *req;
+ struct list_head *lh;
+
+ while ((lh = next_probe_client(vif)) != NULL) {
+ req = list_entry(lh, struct wil_probe_client_req, list);
+
+ wil_probe_client_handle(wil, vif, req);
+ kfree(req);
+ }
+}
+
+void wil_probe_client_flush(struct wil6210_vif *vif)
+{
+ struct wil_probe_client_req *req, *t;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_dbg_misc(wil, "probe_client_flush\n");
+
+ mutex_lock(&vif->probe_client_mutex);
+
+ list_for_each_entry_safe(req, t, &vif->probe_client_pending, list) {
+ list_del(&req->list);
+ kfree(req);
+ }
+
+ mutex_unlock(&vif->probe_client_mutex);
+}
+
+static int wil_cfg80211_probe_client(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *peer, u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct wil_probe_client_req *req;
+ int cid = wil_find_cid(wil, vif->mid, peer);
+
+ wil_dbg_misc(wil, "probe_client: %pM => CID %d MID %d\n",
+ peer, cid, vif->mid);
+
+ if (cid < 0)
+ return -ENOLINK;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->cid = cid;
+ req->cookie = cid;
+
+ mutex_lock(&vif->probe_client_mutex);
+ list_add_tail(&req->list, &vif->probe_client_pending);
+ mutex_unlock(&vif->probe_client_mutex);
+
+ *cookie = req->cookie;
+ queue_work(wil->wq_service, &vif->probe_client_worker);
+ return 0;
+}
+
+static int wil_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+
+ if (params->ap_isolate >= 0) {
+ wil_dbg_misc(wil, "change_bss: ap_isolate MID %d, %d => %d\n",
+ vif->mid, vif->ap_isolate, params->ap_isolate);
+ vif->ap_isolate = params->ap_isolate;
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ enum wmi_ps_profile_type ps_profile;
+
+ wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
+ enabled, timeout);
+
+ if (enabled)
+ ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+ else
+ ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
+
+ return wil_ps_update(wil, ps_profile);
+}
+
+static int wil_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* Setting the wakeup trigger based on wow is TBD */
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ goto out;
+
+ wil_dbg_pm(wil, "suspending\n");
+
+ mutex_lock(&wil->mutex);
+ mutex_lock(&wil->vif_mutex);
+ wil_p2p_stop_radio_operations(wil);
+ wil_abort_scan_all_vifs(wil, true);
+ mutex_unlock(&wil->vif_mutex);
+ mutex_unlock(&wil->mutex);
+
+out:
+ return rc;
+}
+
+static int wil_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_pm(wil, "resuming\n");
+
+ return 0;
+}
+
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ int i, rc;
+
+ if (vif->mid != 0)
+ return -EOPNOTSUPP;
+
+ wil_dbg_misc(wil,
+ "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+ request->n_ssids, request->ie_len, request->flags);
+ for (i = 0; i < request->n_ssids; i++) {
+ wil_dbg_misc(wil, "SSID[%d]:", i);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
+ }
+ wil_dbg_misc(wil, "channels:");
+ for (i = 0; i < request->n_channels; i++)
+ wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+ i == request->n_channels - 1 ? "\n" : "");
+ wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+ request->n_match_sets, request->min_rssi_thold,
+ request->delay);
+ for (i = 0; i < request->n_match_sets; i++) {
+ struct cfg80211_match_set *ms = &request->match_sets[i];
+
+ wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+ i, ms->rssi_thold);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ ms->ssid.ssid,
+ ms->ssid.ssid_len, true);
+ }
+ wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+ for (i = 0; i < request->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+ wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+ i, sp->interval, sp->iterations);
+ }
+
+ rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ,
+ request->ie_len, request->ie);
+ if (rc)
+ return rc;
+ return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
+ u64 reqid)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ int rc;
+
+ if (vif->mid != 0)
+ return -EOPNOTSUPP;
+
+ rc = wmi_stop_sched_scan(wil);
+ /* device would return error if it thinks PNO is already stopped.
+ * ignore the return code so user space and driver gets back in-sync
+ */
+ wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+ return 0;
+}
+
+static int
+wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_ft_ies_params *ftie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct cfg80211_bss *bss;
+ struct wmi_ft_reassoc_cmd reassoc;
+ int rc = 0;
+
+ wil_dbg_misc(wil, "update ft ies, mid=%d\n", vif->mid);
+ wil_hex_dump_misc("FT IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ ftie->ie, ftie->ie_len, true);
+
+ if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
+ wil_err(wil, "FW does not support FT roaming\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = wmi_update_ft_ies(vif, ftie->ie_len, ftie->ie);
+ if (rc)
+ return rc;
+
+ if (!test_bit(wil_vif_ft_roam, vif->status))
+ /* vif is not roaming */
+ return 0;
+
+ /* wil_vif_ft_roam is set. wil_cfg80211_update_ft_ies is used as
+ * a trigger for reassoc
+ */
+
+ bss = vif->bss;
+ if (!bss) {
+ wil_err(wil, "FT: bss is NULL\n");
+ return -EINVAL;
+ }
+
+ memset(&reassoc, 0, sizeof(reassoc));
+ ether_addr_copy(reassoc.bssid, bss->bssid);
+
+ rc = wmi_send(wil, WMI_FT_REASSOC_CMDID, vif->mid,
+ &reassoc, sizeof(reassoc));
+ if (rc)
+ wil_err(wil, "FT: reassoc failed (%d)\n", rc);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_multicast_to_unicast(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (wil->multicast_to_unicast == enabled)
+ return 0;
+
+ wil_info(wil, "set multicast to unicast, enabled=%d\n", enabled);
+ wil->multicast_to_unicast = enabled;
+
+ return 0;
+}
+
+static int wil_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil->cqm_rssi_thold = rssi_thold;
+
+ rc = wmi_set_cqm_rssi_config(wil, rssi_thold, rssi_hyst);
+ if (rc)
+ /* reset stored value upon failure */
+ wil->cqm_rssi_thold = 0;
+
+ return rc;
+}
+
+static const struct cfg80211_ops wil_cfg80211_ops = {
+ .add_virtual_intf = wil_cfg80211_add_iface,
+ .del_virtual_intf = wil_cfg80211_del_iface,
+ .scan = wil_cfg80211_scan,
+ .abort_scan = wil_cfg80211_abort_scan,
+ .connect = wil_cfg80211_connect,
+ .disconnect = wil_cfg80211_disconnect,
+ .set_wiphy_params = wil_cfg80211_set_wiphy_params,
+ .change_virtual_intf = wil_cfg80211_change_iface,
+ .get_station = wil_cfg80211_get_station,
+ .dump_station = wil_cfg80211_dump_station,
+ .remain_on_channel = wil_remain_on_channel,
+ .cancel_remain_on_channel = wil_cancel_remain_on_channel,
+ .mgmt_tx = wil_cfg80211_mgmt_tx,
+ .set_monitor_channel = wil_cfg80211_set_channel,
+ .add_key = wil_cfg80211_add_key,
+ .del_key = wil_cfg80211_del_key,
+ .set_default_key = wil_cfg80211_set_default_key,
+ /* AP mode */
+ .change_beacon = wil_cfg80211_change_beacon,
+ .start_ap = wil_cfg80211_start_ap,
+ .stop_ap = wil_cfg80211_stop_ap,
+ .add_station = wil_cfg80211_add_station,
+ .del_station = wil_cfg80211_del_station,
+ .change_station = wil_cfg80211_change_station,
+ .probe_client = wil_cfg80211_probe_client,
+ .change_bss = wil_cfg80211_change_bss,
+ /* P2P device */
+ .start_p2p_device = wil_cfg80211_start_p2p_device,
+ .stop_p2p_device = wil_cfg80211_stop_p2p_device,
+ .set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .set_cqm_rssi_config = wil_cfg80211_set_cqm_rssi_config,
+ .suspend = wil_cfg80211_suspend,
+ .resume = wil_cfg80211_resume,
+ .sched_scan_start = wil_cfg80211_sched_scan_start,
+ .sched_scan_stop = wil_cfg80211_sched_scan_stop,
+ .update_ft_ies = wil_cfg80211_update_ft_ies,
+ .set_multicast_to_unicast = wil_cfg80211_set_multicast_to_unicast,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+ wiphy->max_scan_ssids = 1;
+ wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
+ wiphy->max_num_pmkids = 0 /* TODO: */;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_MONITOR);
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (!disable_ap_sme)
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+ dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+ __func__, wiphy->flags);
+ wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
+
+ /* may change after reading FW capabilities */
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ wiphy->cipher_suites = wil_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+ wiphy->mgmt_stypes = wil_mgmt_stypes;
+ wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+
+ wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
+ wiphy->vendor_commands = wil_nl80211_vendor_commands;
+
+#ifdef CONFIG_PM
+ wiphy->wowlan = &wil_wowlan_support;
+#endif
+}
+
+int wil_cfg80211_iface_combinations_from_fw(
+ struct wil6210_priv *wil, const struct wil_fw_record_concurrency *conc)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u32 total_limits = 0;
+ u16 n_combos;
+ const struct wil_fw_concurrency_combo *combo;
+ const struct wil_fw_concurrency_limit *limit;
+ struct ieee80211_iface_combination *iface_combinations;
+ struct ieee80211_iface_limit *iface_limit;
+ int i, j;
+
+ if (wiphy->iface_combinations) {
+ wil_dbg_misc(wil, "iface_combinations already set, skipping\n");
+ return 0;
+ }
+
+ combo = conc->combos;
+ n_combos = le16_to_cpu(conc->n_combos);
+ for (i = 0; i < n_combos; i++) {
+ total_limits += combo->n_limits;
+ limit = combo->limits + combo->n_limits;
+ combo = (struct wil_fw_concurrency_combo *)limit;
+ }
+
+ iface_combinations =
+ kzalloc(n_combos * sizeof(struct ieee80211_iface_combination) +
+ total_limits * sizeof(struct ieee80211_iface_limit),
+ GFP_KERNEL);
+ if (!iface_combinations)
+ return -ENOMEM;
+ iface_limit = (struct ieee80211_iface_limit *)(iface_combinations +
+ n_combos);
+ combo = conc->combos;
+ for (i = 0; i < n_combos; i++) {
+ iface_combinations[i].max_interfaces = combo->max_interfaces;
+ iface_combinations[i].num_different_channels =
+ combo->n_diff_channels;
+ iface_combinations[i].beacon_int_infra_match =
+ combo->same_bi;
+ iface_combinations[i].n_limits = combo->n_limits;
+ wil_dbg_misc(wil,
+ "iface_combination %d: max_if %d, num_ch %d, bi_match %d\n",
+ i, iface_combinations[i].max_interfaces,
+ iface_combinations[i].num_different_channels,
+ iface_combinations[i].beacon_int_infra_match);
+ limit = combo->limits;
+ for (j = 0; j < combo->n_limits; j++) {
+ iface_limit[j].max = le16_to_cpu(limit[j].max);
+ iface_limit[j].types = le16_to_cpu(limit[j].types);
+ wil_dbg_misc(wil,
+ "limit %d: max %d types 0x%x\n", j,
+ iface_limit[j].max, iface_limit[j].types);
+ }
+ iface_combinations[i].limits = iface_limit;
+ iface_limit += combo->n_limits;
+ limit += combo->n_limits;
+ combo = (struct wil_fw_concurrency_combo *)limit;
+ }
+
+ wil_dbg_misc(wil, "multiple VIFs supported, n_mids %d\n", conc->n_mids);
+ wil->max_vifs = conc->n_mids + 1; /* including main interface */
+ if (wil->max_vifs > WIL_MAX_VIFS) {
+ wil_info(wil, "limited number of VIFs supported(%d, FW %d)\n",
+ WIL_MAX_VIFS, wil->max_vifs);
+ wil->max_vifs = WIL_MAX_VIFS;
+ }
+ wiphy->n_iface_combinations = n_combos;
+ wiphy->iface_combinations = iface_combinations;
+ return 0;
+}
+
+struct wil6210_priv *wil_cfg80211_init(struct device *dev)
+{
+ struct wiphy *wiphy;
+ struct wil6210_priv *wil;
+ struct ieee80211_channel *ch;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* Note: the wireless_dev structure is no longer allocated here.
+ * Instead, it is allocated as part of the net_device structure
+ * for main interface and each VIF.
+ */
+ wiphy = wiphy_new(&wil_cfg80211_ops, sizeof(struct wil6210_priv));
+ if (!wiphy)
+ return ERR_PTR(-ENOMEM);
+
+ set_wiphy_dev(wiphy, dev);
+ wil_wiphy_init(wiphy);
+
+ wil = wiphy_to_wil(wiphy);
+ wil->wiphy = wiphy;
+
+ /* default monitor channel */
+ ch = wiphy->bands[NL80211_BAND_60GHZ]->channels;
+ cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
+
+ return wil;
+}
+
+void wil_cfg80211_deinit(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ dev_dbg(wil_to_dev(wil), "%s()\n", __func__);
+
+ if (!wiphy)
+ return;
+
+ kfree(wiphy->iface_combinations);
+ wiphy->iface_combinations = NULL;
+
+ wiphy_free(wiphy);
+ /* do not access wil6210_priv after returning from here */
+}
+
+void wil_p2p_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *p2p_wdev;
+
+ mutex_lock(&wil->vif_mutex);
+ p2p_wdev = wil->p2p_wdev;
+ wil->p2p_wdev = NULL;
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ mutex_unlock(&wil->vif_mutex);
+ if (p2p_wdev) {
+ cfg80211_unregister_wdev(p2p_wdev);
+ kfree(p2p_wdev);
+ }
+}
+
+static int wil_rf_sector_status_to_rc(u8 status)
+{
+ switch (status) {
+ case WMI_RF_SECTOR_STATUS_SUCCESS:
+ return 0;
+ case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
+ return -EINVAL;
+ case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
+ return -EAGAIN;
+ case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u16 sector_index;
+ u8 sector_type;
+ u32 rf_modules_vec;
+ struct wmi_get_rf_sector_params_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_rf_sector_params_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
+ };
+ struct sk_buff *msg;
+ struct nlattr *nl_cfgs, *nl_cfg;
+ u32 i;
+ struct wmi_rf_sector_info *si;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+ !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ rf_modules_vec = nla_get_u32(
+ tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
+ if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
+ wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
+ return -EINVAL;
+ }
+
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+ rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, vif->mid,
+ &cmd, sizeof(cmd), WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get rf sector cfg failed with status %d\n",
+ reply.evt.status);
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+ }
+
+ msg = cfg80211_vendor_cmd_alloc_reply_skb(
+ wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+ le64_to_cpu(reply.evt.tsf),
+ QCA_ATTR_PAD))
+ goto nla_put_failure;
+
+ nl_cfgs = nla_nest_start_noflag(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+ if (!nl_cfgs)
+ goto nla_put_failure;
+ for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
+ if (!(rf_modules_vec & BIT(i)))
+ continue;
+ nl_cfg = nla_nest_start_noflag(msg, i);
+ if (!nl_cfg)
+ goto nla_put_failure;
+ si = &reply.evt.sectors_info[i];
+ if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+ i) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+ le32_to_cpu(si->etype0)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+ le32_to_cpu(si->etype1)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+ le32_to_cpu(si->etype2)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+ le32_to_cpu(si->psh_hi)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+ le32_to_cpu(si->psh_lo)) ||
+ nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+ le32_to_cpu(si->dtype_swch_off)))
+ goto nla_put_failure;
+ nla_nest_end(msg, nl_cfg);
+ }
+
+ nla_nest_end(msg, nl_cfgs);
+ rc = cfg80211_vendor_cmd_reply(msg);
+ return rc;
+nla_put_failure:
+ kfree_skb(msg);
+ return -ENOBUFS;
+}
+
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ int rc, tmp;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
+ u16 sector_index, rf_module_index;
+ u8 sector_type;
+ u32 rf_modules_vec = 0;
+ struct wmi_set_rf_sector_params_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_rf_sector_params_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
+ };
+ struct nlattr *nl_cfg;
+ struct wmi_rf_sector_info *si;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
+ tmp) {
+ rc = nla_parse_nested_deprecated(tb2,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+ nl_cfg,
+ wil_rf_sector_cfg_policy,
+ NULL);
+ if (rc) {
+ wil_err(wil, "invalid sector cfg\n");
+ return -EINVAL;
+ }
+
+ if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
+ !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
+ wil_err(wil, "missing cfg params\n");
+ return -EINVAL;
+ }
+
+ rf_module_index = nla_get_u8(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
+ if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
+ wil_err(wil, "invalid RF module index %d\n",
+ rf_module_index);
+ return -EINVAL;
+ }
+ rf_modules_vec |= BIT(rf_module_index);
+ si = &cmd.sectors_info[rf_module_index];
+ si->etype0 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
+ si->etype1 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
+ si->etype2 = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
+ si->psh_hi = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
+ si->psh_lo = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
+ si->dtype_swch_off = cpu_to_le32(nla_get_u32(
+ tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
+ }
+
+ cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+ rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, vif->mid,
+ &cmd, sizeof(cmd), WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u8 sector_type, mac_addr[ETH_ALEN];
+ int cid = 0;
+ struct wmi_get_selected_rf_sector_index_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_selected_rf_sector_index_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
+ };
+ struct sk_buff *msg;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ if (tb[QCA_ATTR_MAC_ADDR]) {
+ ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+ cid = wil_find_cid(wil, vif->mid, mac_addr);
+ if (cid < 0) {
+ wil_err(wil, "invalid MAC address %pM\n", mac_addr);
+ return -ENOENT;
+ }
+ } else {
+ if (test_bit(wil_vif_fwconnected, vif->status)) {
+ wil_err(wil, "must specify MAC address when connected\n");
+ return -EINVAL;
+ }
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cid = (u8)cid;
+ cmd.sector_type = sector_type;
+ rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get rf selected sector cfg failed with status %d\n",
+ reply.evt.status);
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+ }
+
+ msg = cfg80211_vendor_cmd_alloc_reply_skb(
+ wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+ if (!msg)
+ return -ENOMEM;
+
+ if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+ le64_to_cpu(reply.evt.tsf),
+ QCA_ATTR_PAD) ||
+ nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
+ le16_to_cpu(reply.evt.sector_idx)))
+ goto nla_put_failure;
+
+ rc = cfg80211_vendor_cmd_reply(msg);
+ return rc;
+nla_put_failure:
+ kfree_skb(msg);
+ return -ENOBUFS;
+}
+
+static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
+ u8 mid, u16 sector_index,
+ u8 sector_type, u8 cid)
+{
+ struct wmi_set_selected_rf_sector_index_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_selected_rf_sector_index_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
+ };
+ int rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sector_idx = cpu_to_le16(sector_index);
+ cmd.sector_type = sector_type;
+ cmd.cid = (u8)cid;
+ rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, mid,
+ &cmd, sizeof(cmd),
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+ &reply, sizeof(reply),
+ 500);
+ if (rc)
+ return rc;
+ return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wdev_to_wil(wdev);
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ int rc;
+ struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+ u16 sector_index;
+ u8 sector_type, mac_addr[ETH_ALEN], i;
+ int cid = 0;
+
+ if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
+ if (rc) {
+ wil_err(wil, "Invalid rf sector ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+ !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+ wil_err(wil, "Invalid rf sector spec\n");
+ return -EINVAL;
+ }
+
+ sector_index = nla_get_u16(
+ tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+ if (sector_index >= WIL_MAX_RF_SECTORS &&
+ sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+ wil_err(wil, "Invalid sector index %d\n", sector_index);
+ return -EINVAL;
+ }
+
+ sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+ if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+ wil_err(wil, "Invalid sector type %d\n", sector_type);
+ return -EINVAL;
+ }
+
+ if (tb[QCA_ATTR_MAC_ADDR]) {
+ ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+ if (!is_broadcast_ether_addr(mac_addr)) {
+ cid = wil_find_cid(wil, vif->mid, mac_addr);
+ if (cid < 0) {
+ wil_err(wil, "invalid MAC address %pM\n",
+ mac_addr);
+ return -ENOENT;
+ }
+ } else {
+ if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+ wil_err(wil, "broadcast MAC valid only with unlocking\n");
+ return -EINVAL;
+ }
+ cid = -1;
+ }
+ } else {
+ if (test_bit(wil_vif_fwconnected, vif->status)) {
+ wil_err(wil, "must specify MAC address when connected\n");
+ return -EINVAL;
+ }
+ /* otherwise, using cid=0 for unassociated station */
+ }
+
+ if (cid >= 0) {
+ rc = wil_rf_sector_wmi_set_selected(wil, vif->mid, sector_index,
+ sector_type, cid);
+ } else {
+ /* unlock all cids */
+ rc = wil_rf_sector_wmi_set_selected(
+ wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX,
+ sector_type, WIL_CID_ALL);
+ if (rc == -EINVAL) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ if (wil->sta[i].mid != vif->mid)
+ continue;
+ rc = wil_rf_sector_wmi_set_selected(
+ wil, vif->mid,
+ WMI_INVALID_RF_SECTOR_INDEX,
+ sector_type, i);
+ /* the FW will silently ignore and return
+ * success for unused cid, so abort the loop
+ * on any other error
+ */
+ if (rc) {
+ wil_err(wil, "unlock cid %d failed with status %d\n",
+ i, rc);
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
new file mode 100644
index 000000000..396c94c53
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "wil6210.h"
+#include "trace.h"
+
+void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_err(wil->main_ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+}
+
+void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!net_ratelimit())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_err(wil->main_ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+}
+
+void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!net_ratelimit())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_dbg(wil->main_ndev, "%pV", &vaf);
+ trace_wil6210_log_dbg(&vaf);
+ va_end(args);
+}
+
+void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_info(wil->main_ndev, "%pV", &vaf);
+ trace_wil6210_log_info(&vaf);
+ va_end(args);
+}
+
+void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ trace_wil6210_log_dbg(&vaf);
+ va_end(args);
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644
index 000000000..04d1aa0e2
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -0,0 +1,2511 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/power_supply.h>
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+#include "pmc.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
+static u32 dbg_status_msg_index;
+/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
+static u32 dbg_sring_index;
+
+enum dbg_off_type {
+ doff_u32 = 0,
+ doff_x32 = 1,
+ doff_ulong = 2,
+ doff_io32 = 3,
+ doff_u8 = 4
+};
+
+/* offset to "wil" */
+struct dbg_off {
+ const char *name;
+ umode_t mode;
+ ulong off;
+ enum dbg_off_type type;
+};
+
+static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_ring *ring,
+ char _s, char _h, int idx)
+{
+ u8 num_of_descs;
+ bool has_skb = false;
+
+ if (ring->is_rx) {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ if (wil->rx_buff_mgmt.buff_arr &&
+ wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))
+ has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ seq_printf(s, "%c", (has_skb) ? _h : _s);
+ } else {
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[idx].tx.enhanced;
+
+ num_of_descs = (u8)d->mac.d[2];
+ has_skb = ring->ctx && ring->ctx[idx].skb;
+ if (num_of_descs >= 1)
+ seq_printf(s, "%c", has_skb ? _h : _s);
+ else
+ /* num_of_descs == 0, it's a frag in a list of descs */
+ seq_printf(s, "%c", has_skb ? 'h' : _s);
+ }
+}
+
+static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct wil_ring *ring,
+ char _s, char _h)
+{
+ void __iomem *x;
+ u32 v;
+
+ seq_printf(s, "RING %s = {\n", name);
+ seq_printf(s, " pa = %pad\n", &ring->pa);
+ seq_printf(s, " va = 0x%p\n", ring->va);
+ seq_printf(s, " size = %d\n", ring->size);
+ if (wil->use_enhanced_dma_hw && ring->is_rx)
+ seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
+ else
+ seq_printf(s, " swtail = %d\n", ring->swtail);
+ seq_printf(s, " swhead = %d\n", ring->swhead);
+ if (wil->use_enhanced_dma_hw) {
+ int ring_id = ring->is_rx ?
+ WIL_RX_DESC_RING_ID : ring - wil->ring_tx;
+ /* SUBQ_CONS is a table of 32 entries, one for each Q pair.
+ * lower 16bits are for even ring_id and upper 16bits are for
+ * odd ring_id
+ */
+ x = wmi_addr(wil, RGF_DMA_SCM_SUBQ_CONS + 4 * (ring_id / 2));
+ v = readl_relaxed(x);
+
+ v = (ring_id % 2 ? (v >> 16) : (v & 0xffff));
+ seq_printf(s, " hwhead = %u\n", v);
+ }
+ seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
+ x = wmi_addr(wil, ring->hwtail);
+ if (x) {
+ v = readl(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
+ seq_puts(s, "???\n");
+ }
+
+ if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ uint i;
+
+ for (i = 0; i < ring->size; i++) {
+ if ((i % 128) == 0 && i != 0)
+ seq_puts(s, "\n");
+ if (wil->use_enhanced_dma_hw) {
+ wil_print_desc_edma(s, wil, ring, _s, _h, i);
+ } else {
+ volatile struct vring_tx_desc *d =
+ &ring->va[i].tx.legacy;
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (ring->ctx[i].skb ? _h : 'h'));
+ }
+ }
+ seq_puts(s, "\n");
+ }
+ seq_puts(s, "}\n");
+}
+
+static int ring_show(struct seq_file *s, void *data)
+{
+ uint i;
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
+
+ for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
+
+ if (ring->va) {
+ int cid = wil->ring2cid_tid[i][0];
+ int tid = wil->ring2cid_tid[i][1];
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+ int used = (ring->size + swhead - swtail)
+ % ring->size;
+ int avail = ring->size - used - 1;
+ char name[10];
+ char sidle[10];
+ /* performance monitoring */
+ cycles_t now = get_cycles();
+ uint64_t idle = txdata->idle * 100;
+ uint64_t total = now - txdata->begin;
+
+ if (total != 0) {
+ do_div(idle, total);
+ snprintf(sidle, sizeof(sidle), "%3d%%",
+ (int)idle);
+ } else {
+ snprintf(sidle, sizeof(sidle), "N/A");
+ }
+ txdata->begin = now;
+ txdata->idle = 0ULL;
+
+ snprintf(name, sizeof(name), "tx_%2d", i);
+
+ if (cid < wil->max_assoc_sta)
+ seq_printf(s,
+ "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+ wil->sta[cid].addr, cid, tid,
+ txdata->dot1x_open ? "+" : "-",
+ txdata->agg_wsize,
+ txdata->agg_timeout,
+ txdata->agg_amsdu ? "+" : "-",
+ used, avail, sidle);
+ else
+ seq_printf(s,
+ "\nBroadcast 1x%s [%3d|%3d] idle %s\n",
+ txdata->dot1x_open ? "+" : "-",
+ used, avail, sidle);
+
+ wil_print_ring(s, wil, name, ring, '_', 'H');
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ring);
+
+static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ void __iomem *x;
+ int sring_idx = sring - wil->srings;
+ u32 v;
+
+ seq_printf(s, "Status Ring %s [ %d ] = {\n",
+ sring->is_rx ? "RX" : "TX", sring_idx);
+ seq_printf(s, " pa = %pad\n", &sring->pa);
+ seq_printf(s, " va = 0x%pK\n", sring->va);
+ seq_printf(s, " size = %d\n", sring->size);
+ seq_printf(s, " elem_size = %zu\n", sring->elem_size);
+ seq_printf(s, " swhead = %d\n", sring->swhead);
+ if (wil->use_enhanced_dma_hw) {
+ /* COMPQ_PROD is a table of 32 entries, one for each Q pair.
+ * lower 16bits are for even ring_id and upper 16bits are for
+ * odd ring_id
+ */
+ x = wmi_addr(wil, RGF_DMA_SCM_COMPQ_PROD + 4 * (sring_idx / 2));
+ v = readl_relaxed(x);
+
+ v = (sring_idx % 2 ? (v >> 16) : (v & 0xffff));
+ seq_printf(s, " hwhead = %u\n", v);
+ }
+ seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
+ x = wmi_addr(wil, sring->hwtail);
+ if (x) {
+ v = readl_relaxed(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
+ seq_puts(s, "???\n");
+ }
+ seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
+ seq_printf(s, " invalid_buff_id_cnt = %d\n",
+ sring->invalid_buff_id_cnt);
+
+ if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
+ uint i;
+
+ for (i = 0; i < sring->size; i++) {
+ u32 *sdword_0 =
+ (u32 *)(sring->va + (sring->elem_size * i));
+
+ if ((i % 128) == 0 && i != 0)
+ seq_puts(s, "\n");
+ if (i == sring->swhead)
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ 'X' : 'x');
+ else
+ seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
+ '1' : '0');
+ }
+ seq_puts(s, "\n");
+ }
+ seq_puts(s, "}\n");
+}
+
+static int srings_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int i = 0;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
+ if (wil->srings[i].va)
+ wil_print_sring(s, wil, &wil->srings[i]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(srings);
+
+static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
+ const char *prefix)
+{
+ seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
+}
+
+static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_mbox_ring r;
+ int rsize;
+ uint i;
+
+ wil_halp_vote(wil);
+
+ if (wil_mem_access_lock(wil)) {
+ wil_halp_unvote(wil);
+ return;
+ }
+
+ wil_memcpy_fromio_32(&r, off, sizeof(r));
+ wil_mbox_ring_le2cpus(&r);
+ /*
+ * we just read memory block from NIC. This memory may be
+ * garbage. Check validity before using it.
+ */
+ rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+ seq_printf(s, "ring %s = {\n", prefix);
+ seq_printf(s, " base = 0x%08x\n", r.base);
+ seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+ seq_printf(s, " tail = 0x%08x\n", r.tail);
+ seq_printf(s, " head = 0x%08x\n", r.head);
+ seq_printf(s, " entry size = %d\n", r.entry_size);
+
+ if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+ seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
+ sizeof(struct wil6210_mbox_ring_desc));
+ goto out;
+ }
+
+ if (!wmi_addr(wil, r.base) ||
+ !wmi_addr(wil, r.tail) ||
+ !wmi_addr(wil, r.head)) {
+ seq_puts(s, " ??? pointers are garbage?\n");
+ goto out;
+ }
+
+ for (i = 0; i < rsize; i++) {
+ struct wil6210_mbox_ring_desc d;
+ struct wil6210_mbox_hdr hdr;
+ size_t delta = i * sizeof(d);
+ void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+ wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+ seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
+ d.sync ? "F" : "E",
+ (r.tail - r.base == delta) ? "t" : " ",
+ (r.head - r.base == delta) ? "h" : " ",
+ le32_to_cpu(d.addr));
+ if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+ u16 len = le16_to_cpu(hdr.len);
+
+ seq_printf(s, " -> %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len,
+ le16_to_cpu(hdr.type), hdr.flags);
+ if (len <= MAX_MBOXITEM_SIZE) {
+ unsigned char databuf[MAX_MBOXITEM_SIZE];
+ void __iomem *src = wmi_buffer(wil, d.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ /*
+ * No need to check @src for validity -
+ * we already validated @d.addr while
+ * reading header
+ */
+ wil_memcpy_fromio_32(databuf, src, len);
+ wil_seq_hexdump(s, databuf, len, " : ");
+ }
+ } else {
+ seq_puts(s, "\n");
+ }
+ }
+ out:
+ seq_puts(s, "}\n");
+ wil_mem_access_unlock(wil);
+ wil_halp_unvote(wil);
+}
+
+static int mbox_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx));
+ wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx));
+
+ wil_pm_runtime_put(wil);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mbox);
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+ struct wil_debugfs_iomem_data *d = (struct
+ wil_debugfs_iomem_data *)data;
+ struct wil6210_priv *wil = d->wil;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(val, (void __iomem *)d->offset);
+
+ wmb(); /* make sure write propagated to HW */
+
+ wil_pm_runtime_put(wil);
+
+ return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ struct wil_debugfs_iomem_data *d = (struct
+ wil_debugfs_iomem_data *)data;
+ struct wil6210_priv *wil = d->wil;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ *val = readl((void __iomem *)d->offset);
+
+ wil_pm_runtime_put(wil);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static void wil_debugfs_create_iomem_x32(const char *name, umode_t mode,
+ struct dentry *parent, void *value,
+ struct wil6210_priv *wil)
+{
+ struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+ wil->dbg_data.iomem_data_count];
+
+ data->wil = wil;
+ data->offset = value;
+
+ debugfs_create_file_unsafe(name, mode, parent, data, &fops_iomem_x32);
+ wil->dbg_data.iomem_data_count++;
+}
+
+static int wil_debugfs_ulong_set(void *data, u64 val)
+{
+ *(ulong *)data = val;
+ return 0;
+}
+
+static int wil_debugfs_ulong_get(void *data, u64 *val)
+{
+ *val = *(ulong *)data;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
+ wil_debugfs_ulong_set, "0x%llx\n");
+
+/**
+ * wil6210_debugfs_init_offset - create set of debugfs files
+ * @wil: driver's context, used for printing
+ * @dbg: directory on the debugfs, where files will be created
+ * @base: base address used in address calculation
+ * @tbl: table with file descriptions. Should be terminated with empty element.
+ *
+ * Creates files accordingly to the @tbl.
+ */
+static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
+ struct dentry *dbg, void *base,
+ const struct dbg_off * const tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].name; i++) {
+ switch (tbl[i].type) {
+ case doff_u32:
+ debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
+ case doff_x32:
+ debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
+ case doff_ulong:
+ debugfs_create_file_unsafe(tbl[i].name, tbl[i].mode,
+ dbg, base + tbl[i].off,
+ &wil_fops_ulong);
+ break;
+ case doff_io32:
+ wil_debugfs_create_iomem_x32(tbl[i].name, tbl[i].mode,
+ dbg, base + tbl[i].off,
+ wil);
+ break;
+ case doff_u8:
+ debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
+ }
+ }
+}
+
+static const struct dbg_off isr_off[] = {
+ {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32},
+ {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32},
+ {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32},
+ {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32},
+ {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32},
+ {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32},
+ {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32},
+ {},
+};
+
+static void wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name, struct dentry *parent,
+ u32 off)
+{
+ struct dentry *d = debugfs_create_dir(name, parent);
+
+ wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr + off,
+ isr_off);
+}
+
+static const struct dbg_off pseudo_isr_off[] = {
+ {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
+ {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
+ {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
+ {},
+};
+
+static void wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+ wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
+ pseudo_isr_off);
+}
+
+static const struct dbg_off lgc_itr_cnt_off[] = {
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
+ {},
+};
+
+static const struct dbg_off tx_itr_cnt_off[] = {
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+ doff_io32},
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+ doff_io32},
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+ doff_io32},
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+ doff_io32},
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+ doff_io32},
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+ doff_io32},
+ {},
+};
+
+static const struct dbg_off rx_itr_cnt_off[] = {
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+ doff_io32},
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+ doff_io32},
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+ doff_io32},
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+ doff_io32},
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+ doff_io32},
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+ doff_io32},
+ {},
+};
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d, *dtx, *drx;
+
+ d = debugfs_create_dir("ITR_CNT", parent);
+
+ dtx = debugfs_create_dir("TX", d);
+ drx = debugfs_create_dir("RX", d);
+
+ wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
+ lgc_itr_cnt_off);
+
+ wil6210_debugfs_init_offset(wil, dtx, (void * __force)wil->csr,
+ tx_itr_cnt_off);
+
+ wil6210_debugfs_init_offset(wil, drx, (void * __force)wil->csr,
+ rx_itr_cnt_off);
+ return 0;
+}
+
+static int memread_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ void __iomem *a;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ ret = wil_mem_access_lock(wil);
+ if (ret) {
+ wil_pm_runtime_put(wil);
+ return ret;
+ }
+
+ a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+ if (a)
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
+ else
+ seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+ wil_mem_access_unlock(wil);
+ wil_pm_runtime_put(wil);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(memread);
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum { max_count = 4096 };
+ struct wil_blob_wrapper *wil_blob = file->private_data;
+ struct wil6210_priv *wil = wil_blob->wil;
+ loff_t aligned_pos, pos = *ppos;
+ size_t available = wil_blob->blob.size;
+ void *buf;
+ size_t unaligned_bytes, aligned_count, ret;
+ int rc;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos >= available || !count)
+ return 0;
+
+ if (count > available - pos)
+ count = available - pos;
+ if (count > max_count)
+ count = max_count;
+
+ /* set pos to 4 bytes aligned */
+ unaligned_bytes = pos % 4;
+ aligned_pos = pos - unaligned_bytes;
+ aligned_count = count + unaligned_bytes;
+
+ buf = kmalloc(aligned_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = wil_pm_runtime_get(wil);
+ if (rc < 0) {
+ kfree(buf);
+ return rc;
+ }
+
+ rc = wil_mem_access_lock(wil);
+ if (rc) {
+ kfree(buf);
+ wil_pm_runtime_put(wil);
+ return rc;
+ }
+
+ wil_memcpy_fromio_32(buf, (const void __iomem *)
+ wil_blob->blob.data + aligned_pos, aligned_count);
+
+ ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
+
+ wil_mem_access_unlock(wil);
+ wil_pm_runtime_put(wil);
+
+ kfree(buf);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations fops_ioblob = {
+ .read = wil_read_file_ioblob,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ struct wil_blob_wrapper *wil_blob)
+{
+ return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
+}
+
+/*---write channel 1..4 to rxon for it, 0 to rxoff---*/
+static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ long channel;
+ bool on;
+
+ char *kbuf = memdup_user_nul(buf, len);
+
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+ rc = kstrtol(kbuf, 0, &channel);
+ kfree(kbuf);
+ if (rc)
+ return rc;
+
+ if ((channel < 0) || (channel > 4)) {
+ wil_err(wil, "Invalid channel %ld\n", channel);
+ return -EINVAL;
+ }
+ on = !!channel;
+
+ if (on) {
+ rc = wmi_set_channel(wil, (int)channel);
+ if (rc)
+ return rc;
+ }
+
+ rc = wmi_rxon(wil, on);
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static const struct file_operations fops_rxon = {
+ .write = wil_write_file_rxon,
+ .open = simple_open,
+};
+
+static ssize_t wil_write_file_rbufcap(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int val;
+ int rc;
+
+ rc = kstrtoint_from_user(buf, count, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+ /* input value: negative to disable, 0 to use system default,
+ * 1..ring size to set descriptor threshold
+ */
+ wil_info(wil, "%s RBUFCAP, descriptors threshold - %d\n",
+ val < 0 ? "Disabling" : "Enabling", val);
+
+ if (!wil->ring_rx.va || val > wil->ring_rx.size) {
+ wil_err(wil, "Invalid descriptors threshold, %d\n", val);
+ return -EINVAL;
+ }
+
+ rc = wmi_rbufcap_cfg(wil, val < 0 ? 0 : 1, val < 0 ? 0 : val);
+ if (rc) {
+ wil_err(wil, "RBUFCAP config failed: %d\n", rc);
+ return rc;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_rbufcap = {
+ .write = wil_write_file_rbufcap,
+ .open = simple_open,
+};
+
+/* block ack control, write:
+ * - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
+ * - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
+ * - "del_rx <CID> <TID> <reason>" to trigger DELBA for Rx side
+ */
+static ssize_t wil_write_back(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+ char cmd[9];
+ int p1, p2, p3;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%8s %d %d %d", cmd, &p1, &p2, &p3);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+ if (rc < 2)
+ return -EINVAL;
+
+ if ((strcmp(cmd, "add") == 0) ||
+ (strcmp(cmd, "del_tx") == 0)) {
+ struct wil_ring_tx_data *txdata;
+
+ if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
+ wil_err(wil, "BACK: invalid ring id %d\n", p1);
+ return -EINVAL;
+ }
+ txdata = &wil->ring_tx_data[p1];
+ if (strcmp(cmd, "add") == 0) {
+ if (rc < 3) {
+ wil_err(wil, "BACK: add require at least 2 params\n");
+ return -EINVAL;
+ }
+ if (rc < 4)
+ p3 = 0;
+ wmi_addba(wil, txdata->mid, p1, p2, p3);
+ } else {
+ if (rc < 3)
+ p2 = WLAN_REASON_QSTA_LEAVE_QBSS;
+ wmi_delba_tx(wil, txdata->mid, p1, p2);
+ }
+ } else if (strcmp(cmd, "del_rx") == 0) {
+ struct wil_sta_info *sta;
+
+ if (rc < 3) {
+ wil_err(wil,
+ "BACK: del_rx require at least 2 params\n");
+ return -EINVAL;
+ }
+ if (p1 < 0 || p1 >= wil->max_assoc_sta) {
+ wil_err(wil, "BACK: invalid CID %d\n", p1);
+ return -EINVAL;
+ }
+ if (rc < 4)
+ p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
+ sta = &wil->sta[p1];
+ wmi_delba_rx(wil, sta->mid, p1, p2, p3);
+ } else {
+ wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static ssize_t wil_read_back(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char text[] = "block ack control, write:\n"
+ " - \"add <ringid> <agg_size> <timeout>\" to trigger ADDBA\n"
+ "If missing, <timeout> defaults to 0\n"
+ " - \"del_tx <ringid> <reason>\" to trigger DELBA for Tx side\n"
+ " - \"del_rx <CID> <TID> <reason>\" to trigger DELBA for Rx side\n"
+ "If missing, <reason> set to \"STA_LEAVING\" (36)\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ sizeof(text));
+}
+
+static const struct file_operations fops_back = {
+ .read = wil_read_back,
+ .write = wil_write_back,
+ .open = simple_open,
+};
+
+/* pmc control, write:
+ * - "alloc <num descriptors> <descriptor_size>" to allocate PMC
+ * - "free" to release memory allocated for PMC
+ */
+static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+ char cmd[9];
+ int num_descs, desc_size;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+
+ if (rc < 1) {
+ wil_err(wil, "pmccfg: no params given\n");
+ return -EINVAL;
+ }
+
+ if (0 == strcmp(cmd, "alloc")) {
+ if (rc != 3) {
+ wil_err(wil, "pmccfg: alloc requires 2 params\n");
+ return -EINVAL;
+ }
+ wil_pmc_alloc(wil, num_descs, desc_size);
+ } else if (0 == strcmp(cmd, "free")) {
+ if (rc != 1) {
+ wil_err(wil, "pmccfg: free does not have any params\n");
+ return -EINVAL;
+ }
+ wil_pmc_free(wil, true);
+ } else {
+ wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ char text[256];
+ char help[] = "pmc control, write:\n"
+ " - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
+ " - \"free\" to free memory allocated for pmc\n";
+
+ snprintf(text, sizeof(text), "Last command status: %d\n\n%s",
+ wil_pmc_last_cmd_status(wil), help);
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ strlen(text) + 1);
+}
+
+static const struct file_operations fops_pmccfg = {
+ .read = wil_read_pmccfg,
+ .write = wil_write_pmccfg,
+ .open = simple_open,
+};
+
+static const struct file_operations fops_pmcdata = {
+ .open = simple_open,
+ .read = wil_pmc_read,
+ .llseek = wil_pmc_llseek,
+};
+
+static int wil_pmcring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_pmcring_read, inode->i_private);
+}
+
+static const struct file_operations fops_pmcring = {
+ .open = wil_pmcring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---tx_mgmt---*/
+/* Write mgmt frame to this file to send it */
+static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
+ struct cfg80211_mgmt_tx_params params;
+ int rc;
+ void *frame;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!len)
+ return -EINVAL;
+
+ frame = memdup_user(buf, len);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ params.buf = frame;
+ params.len = len;
+
+ rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
+
+ kfree(frame);
+ wil_info(wil, "-> %d\n", rc);
+
+ return len;
+}
+
+static const struct file_operations fops_txmgmt = {
+ .write = wil_write_file_txmgmt,
+ .open = simple_open,
+};
+
+/* Write WMI command (w/o mbox header) to this file to send it
+ * WMI starts from wil6210_mbox_hdr_wmi header
+ */
+static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_cmd_hdr *wmi;
+ void *cmd;
+ int cmdlen = len - sizeof(struct wmi_cmd_hdr);
+ u16 cmdid;
+ int rc1;
+
+ if (cmdlen < 0 || *ppos != 0)
+ return -EINVAL;
+
+ wmi = memdup_user(buf, len);
+ if (IS_ERR(wmi))
+ return PTR_ERR(wmi);
+
+ cmd = (cmdlen > 0) ? &wmi[1] : NULL;
+ cmdid = le16_to_cpu(wmi->command_id);
+
+ rc1 = wmi_send(wil, cmdid, vif->mid, cmd, cmdlen);
+ kfree(wmi);
+
+ wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
+
+ return len;
+}
+
+static const struct file_operations fops_wmi = {
+ .write = wil_write_file_wmi,
+ .open = simple_open,
+};
+
+static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
+{
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ seq_printf(s, " len = %d\n", len);
+ wil_seq_hexdump(s, p, len, " : ");
+
+ if (nr_frags) {
+ seq_printf(s, " nr_frags = %d\n", nr_frags);
+ for (i = 0; i < nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = skb_frag_size(frag);
+ p = skb_frag_address_safe(frag);
+ seq_printf(s, " [%2d] : len = %d\n", i, len);
+ wil_seq_hexdump(s, p, len, " : ");
+ }
+ }
+}
+
+/*---------Tx/Rx descriptor------------*/
+static int txdesc_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil_ring *ring;
+ bool tx;
+ int ring_idx = dbg_ring_index;
+ int txdesc_idx = dbg_txdesc_index;
+ volatile struct vring_tx_desc *d;
+ volatile u32 *u;
+ struct sk_buff *skb;
+
+ if (wil->use_enhanced_dma_hw) {
+ /* RX ring index == 0 */
+ if (ring_idx >= WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
+ } else {
+ /* RX ring index == WIL6210_MAX_TX_RINGS */
+ if (ring_idx > WIL6210_MAX_TX_RINGS) {
+ seq_printf(s, "invalid ring index %d\n", ring_idx);
+ return 0;
+ }
+ tx = (ring_idx < WIL6210_MAX_TX_RINGS);
+ }
+
+ ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
+
+ if (!ring->va) {
+ if (tx)
+ seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
+ else
+ seq_puts(s, "No Rx RING\n");
+ return 0;
+ }
+
+ if (txdesc_idx >= ring->size) {
+ if (tx)
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ ring_idx, txdesc_idx, ring->size);
+ else
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ txdesc_idx, ring->size);
+ return 0;
+ }
+
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
+ d = &ring->va[txdesc_idx].tx.legacy;
+ u = (volatile u32 *)d;
+ skb = NULL;
+
+ if (wil->use_enhanced_dma_hw) {
+ if (tx) {
+ skb = ring->ctx ? ring->ctx[txdesc_idx].skb : NULL;
+ } else if (wil->rx_buff_mgmt.buff_arr) {
+ struct wil_rx_enhanced_desc *rx_d =
+ (struct wil_rx_enhanced_desc *)
+ &ring->va[txdesc_idx].rx.enhanced;
+ u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
+
+ if (!wil_val_in_range(buff_id, 0,
+ wil->rx_buff_mgmt.size))
+ seq_printf(s, "invalid buff_id %d\n", buff_id);
+ else
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ }
+ } else {
+ skb = ring->ctx[txdesc_idx].skb;
+ }
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
+ txdesc_idx);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = 0x%p\n", skb);
+
+ if (skb) {
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
+ }
+ seq_puts(s, "}\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(txdesc);
+
+/*---------Tx/Rx status message------------*/
+static int status_msg_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int sring_idx = dbg_sring_index;
+ struct wil_status_ring *sring;
+ bool tx;
+ u32 status_msg_idx = dbg_status_msg_index;
+ u32 *u;
+
+ if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
+ seq_printf(s, "invalid status ring index %d\n", sring_idx);
+ return 0;
+ }
+
+ sring = &wil->srings[sring_idx];
+ tx = !sring->is_rx;
+
+ if (!sring->va) {
+ seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
+ return 0;
+ }
+
+ if (status_msg_idx >= sring->size) {
+ seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
+ tx ? 'T' : 'R', status_msg_idx, sring->size);
+ return 0;
+ }
+
+ u = sring->va + (sring->elem_size * status_msg_idx);
+
+ seq_printf(s, "%cx[%d][%3d] = {\n",
+ tx ? 'T' : 'R', sring_idx, status_msg_idx);
+
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ if (!tx && !wil->use_compressed_rx_status)
+ seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+
+ seq_puts(s, "}\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(status_msg);
+
+static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
+{
+ struct wil_rx_buff *it;
+ int i = 0;
+
+ list_for_each_entry(it, lh, list) {
+ if ((i % 16) == 0 && i != 0)
+ seq_puts(s, "\n ");
+ seq_printf(s, "[%4d] ", it->id);
+ i++;
+ }
+ seq_printf(s, "\nNumber of buffers: %u\n", i);
+
+ return i;
+}
+
+static int rx_buff_mgmt_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
+ int num_active;
+ int num_free;
+
+ if (!rbm->buff_arr)
+ return -EINVAL;
+
+ seq_printf(s, " size = %zu\n", rbm->size);
+ seq_printf(s, " free_list_empty_cnt = %lu\n",
+ rbm->free_list_empty_cnt);
+
+ /* Print active list */
+ seq_puts(s, " Active list:\n");
+ num_active = wil_print_rx_buff(s, &rbm->active);
+ seq_puts(s, "\n Free list:\n");
+ num_free = wil_print_rx_buff(s, &rbm->free);
+
+ seq_printf(s, " Total number of buffers: %u\n",
+ num_active + num_free);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rx_buff_mgmt);
+
+/*---------beamforming------------*/
+static char *wil_bfstatus_str(u32 status)
+{
+ switch (status) {
+ case 0:
+ return "Failed";
+ case 1:
+ return "OK";
+ case 2:
+ return "Retrying";
+ default:
+ return "??";
+ }
+}
+
+static bool is_all_zeros(void * const x_, size_t sz)
+{
+ /* if reply is all-0, ignore this CID */
+ u32 *x = x_;
+ int n;
+
+ for (n = 0; n < sz / sizeof(*x); n++)
+ if (x[n])
+ return false;
+
+ return true;
+}
+
+static int bf_show(struct seq_file *s, void *data)
+{
+ int rc;
+ int i;
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_notify_req_cmd cmd = {
+ .interval_usec = 0,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_notify_req_done_event evt;
+ } __packed reply;
+
+ memset(&reply, 0, sizeof(reply));
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ u32 status;
+ u8 bf_mcs;
+
+ cmd.cid = i;
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ /* if reply is all-0, ignore this CID */
+ if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt)))
+ continue;
+
+ status = le32_to_cpu(reply.evt.status);
+ bf_mcs = le16_to_cpu(reply.evt.bf_mcs);
+ seq_printf(s, "CID %d {\n"
+ " TSF = 0x%016llx\n"
+ " TxMCS = %s TxTpt = %4d\n"
+ " SQI = %4d\n"
+ " RSSI = %4d\n"
+ " Status = 0x%08x %s\n"
+ " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n"
+ " Goodput(rx:tx) %4d:%4d\n"
+ "}\n",
+ i,
+ le64_to_cpu(reply.evt.tsf),
+ WIL_EXTENDED_MCS_CHECK(bf_mcs),
+ le32_to_cpu(reply.evt.tx_tpt),
+ reply.evt.sqi,
+ reply.evt.rssi,
+ status, wil_bfstatus_str(status),
+ le16_to_cpu(reply.evt.my_rx_sector),
+ le16_to_cpu(reply.evt.my_tx_sector),
+ le16_to_cpu(reply.evt.other_rx_sector),
+ le16_to_cpu(reply.evt.other_tx_sector),
+ le32_to_cpu(reply.evt.rx_goodput),
+ le32_to_cpu(reply.evt.tx_goodput));
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bf);
+
+/*---------temp------------*/
+static void print_temp(struct seq_file *s, const char *prefix, s32 t)
+{
+ switch (t) {
+ case 0:
+ case WMI_INVALID_TEMPERATURE:
+ seq_printf(s, "%s N/A\n", prefix);
+ break;
+ default:
+ seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
+ abs(t / 1000), abs(t % 1000));
+ break;
+ }
+}
+
+static int temp_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int rc, i;
+
+ if (test_bit(WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF,
+ wil->fw_capabilities)) {
+ struct wmi_temp_sense_all_done_event sense_all_evt;
+
+ wil_dbg_misc(wil,
+ "WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is supported");
+ rc = wmi_get_all_temperatures(wil, &sense_all_evt);
+ if (rc) {
+ seq_puts(s, "Failed\n");
+ return 0;
+ }
+ print_temp(s, "T_mac =",
+ le32_to_cpu(sense_all_evt.baseband_t1000));
+ seq_printf(s, "Connected RFs [0x%08x]\n",
+ sense_all_evt.rf_bitmap);
+ for (i = 0; i < WMI_MAX_XIF_PORTS_NUM; i++) {
+ seq_printf(s, "RF[%d] = ", i);
+ print_temp(s, "",
+ le32_to_cpu(sense_all_evt.rf_t1000[i]));
+ }
+ } else {
+ s32 t_m, t_r;
+
+ wil_dbg_misc(wil,
+ "WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is not supported");
+ rc = wmi_get_temperature(wil, &t_m, &t_r);
+ if (rc) {
+ seq_puts(s, "Failed\n");
+ return 0;
+ }
+ print_temp(s, "T_mac =", t_m);
+ print_temp(s, "T_radio =", t_r);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(temp);
+
+/*---------link------------*/
+static int link_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct station_info *sinfo;
+ int i, rc = 0;
+
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ struct wil6210_vif *vif;
+ u8 mid;
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ break;
+ }
+ mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
+ seq_printf(s, "[%d][MID %d] %pM %s\n",
+ i, mid, p->addr, status);
+
+ if (p->status != wil_sta_connected)
+ continue;
+
+ vif = (mid < GET_MAX_VIFS(wil)) ? wil->vifs[mid] : NULL;
+ if (vif) {
+ rc = wil_cid_fill_sinfo(vif, i, sinfo);
+ if (rc)
+ goto out;
+
+ seq_printf(s, " Tx_mcs = %s\n",
+ WIL_EXTENDED_MCS_CHECK(sinfo->txrate.mcs));
+ seq_printf(s, " Rx_mcs = %s\n",
+ WIL_EXTENDED_MCS_CHECK(sinfo->rxrate.mcs));
+ seq_printf(s, " SQ = %d\n", sinfo->signal);
+ } else {
+ seq_puts(s, " INVALID MID\n");
+ }
+ }
+
+out:
+ kfree(sinfo);
+ return rc;
+}
+DEFINE_SHOW_ATTRIBUTE(link);
+
+/*---------info------------*/
+static int info_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct net_device *ndev = wil->main_ndev;
+ int is_ac = power_supply_is_system_supplied();
+ int rx = atomic_xchg(&wil->isr_count_rx, 0);
+ int tx = atomic_xchg(&wil->isr_count_tx, 0);
+ static ulong rxf_old, txf_old;
+ ulong rxf = ndev->stats.rx_packets;
+ ulong txf = ndev->stats.tx_packets;
+ unsigned int i;
+
+ /* >0 : AC; 0 : battery; <0 : error */
+ seq_printf(s, "AC powered : %d\n", is_ac);
+ seq_printf(s, "Rx irqs:packets : %8d : %8ld\n", rx, rxf - rxf_old);
+ seq_printf(s, "Tx irqs:packets : %8d : %8ld\n", tx, txf - txf_old);
+ rxf_old = rxf;
+ txf_old = txf;
+
+#define CHECK_QSTATE(x) (state & BIT(__QUEUE_STATE_ ## x)) ? \
+ " " __stringify(x) : ""
+
+ for (i = 0; i < ndev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
+ unsigned long state = txq->state;
+
+ seq_printf(s, "Tx queue[%i] state : 0x%lx%s%s%s\n", i, state,
+ CHECK_QSTATE(DRV_XOFF),
+ CHECK_QSTATE(STACK_XOFF),
+ CHECK_QSTATE(FROZEN)
+ );
+ }
+#undef CHECK_QSTATE
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(info);
+
+/*---------recovery------------*/
+/* mode = [manual|auto]
+ * state = [idle|pending|running]
+ */
+static ssize_t wil_read_file_recovery(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ char buf[80];
+ int n;
+ static const char * const sstate[] = {"idle", "pending", "running"};
+
+ n = snprintf(buf, sizeof(buf), "mode = %s\nstate = %s\n",
+ no_fw_recovery ? "manual" : "auto",
+ sstate[wil->recovery_state]);
+
+ n = min_t(int, n, sizeof(buf));
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, n);
+}
+
+static ssize_t wil_write_file_recovery(struct file *file,
+ const char __user *buf_,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ static const char run_command[] = "run";
+ char buf[sizeof(run_command) + 1]; /* to detect "runx" */
+ ssize_t rc;
+
+ if (wil->recovery_state != fw_recovery_pending) {
+ wil_err(wil, "No recovery pending\n");
+ return -EINVAL;
+ }
+
+ if (*ppos != 0) {
+ wil_err(wil, "Offset [%d]\n", (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(buf)) {
+ wil_err(wil, "Input too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, buf_, count);
+ if (rc < 0)
+ return rc;
+
+ buf[rc] = '\0';
+ if (0 == strcmp(buf, run_command))
+ wil_set_recovery_state(wil, fw_recovery_running);
+ else
+ wil_err(wil, "Bad recovery command \"%s\"\n", buf);
+
+ return rc;
+}
+
+static const struct file_operations fops_recovery = {
+ .read = wil_read_file_recovery,
+ .write = wil_write_file_recovery,
+ .open = simple_open,
+};
+
+/*---------Station matrix------------*/
+static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
+{
+ int i;
+ u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+ unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
+ unsigned long long drop_dup_mcast = r->drop_dup_mcast;
+
+ seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num);
+ for (i = 0; i < r->buf_size; i++) {
+ if (i == index)
+ seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
+ else
+ seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
+ }
+ seq_printf(s,
+ "] total %llu drop %llu (dup %llu + old %llu + dup mcast %llu) last 0x%03x\n",
+ r->total, drop_dup + drop_old + drop_dup_mcast, drop_dup,
+ drop_old, drop_dup_mcast, r->ssn_last_drop);
+}
+
+static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
+ struct wil_tid_crypto_rx *c)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+ if (cc->key_set)
+ goto has_keys;
+ }
+ return;
+
+has_keys:
+ if (tid < WIL_STA_TID_NUM)
+ seq_printf(s, " [%2d] PN", tid);
+ else
+ seq_puts(s, " [GR] PN");
+
+ for (i = 0; i < 4; i++) {
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+ seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
+ cc->pn);
+ }
+ seq_puts(s, "\n");
+}
+
+static int sta_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, tid, mcs;
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ u8 aid = 0;
+ u8 mid;
+ bool sta_connected = false;
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ aid = p->aid;
+ break;
+ }
+ mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
+ if (mid < GET_MAX_VIFS(wil)) {
+ struct wil6210_vif *vif = wil->vifs[mid];
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+ p->status == wil_sta_connected)
+ sta_connected = true;
+ }
+ /* print roam counter only for connected stations */
+ if (sta_connected)
+ seq_printf(s, "[%d] %pM connected (roam counter %d) MID %d AID %d\n",
+ i, p->addr, p->stats.ft_roams, mid, aid);
+ else
+ seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i,
+ p->addr, status, mid, aid);
+
+ if (p->status == wil_sta_connected) {
+ spin_lock_bh(&p->tid_rx_lock);
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+ struct wil_tid_crypto_rx *c =
+ &p->tid_crypto_rx[tid];
+
+ if (r) {
+ seq_printf(s, " [%2d] ", tid);
+ wil_print_rxtid(s, r);
+ }
+
+ wil_print_rxtid_crypto(s, tid, c);
+ }
+ wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
+ &p->group_crypto_rx);
+ spin_unlock_bh(&p->tid_rx_lock);
+ seq_printf(s,
+ "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
+ p->stats.rx_non_data_frame,
+ p->stats.rx_short_frame,
+ p->stats.rx_large_frame,
+ p->stats.rx_replay);
+ seq_printf(s,
+ "mic error %lu, key error %lu, amsdu error %lu, csum error %lu\n",
+ p->stats.rx_mic_error,
+ p->stats.rx_key_error,
+ p->stats.rx_amsdu_error,
+ p->stats.rx_csum_err);
+
+ seq_puts(s, "Rx/MCS:");
+ for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
+ mcs++)
+ seq_printf(s, " %lld",
+ p->stats.rx_per_mcs[mcs]);
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sta);
+
+static int mids_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_vif *vif;
+ struct net_device *ndev;
+ int i;
+
+ mutex_lock(&wil->vif_mutex);
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+
+ if (vif) {
+ ndev = vif_to_ndev(vif);
+ seq_printf(s, "[%d] %pM %s\n", i, ndev->dev_addr,
+ ndev->name);
+ } else {
+ seq_printf(s, "[%d] unused\n", i);
+ }
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mids);
+
+static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, bin;
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ u8 aid = 0;
+ u8 mid;
+
+ if (!p->tx_latency_bins)
+ continue;
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ aid = p->aid;
+ break;
+ }
+ mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
+ seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
+ mid, aid);
+
+ if (p->status == wil_sta_connected) {
+ u64 num_packets = 0;
+ u64 tx_latency_avg = p->stats.tx_latency_total_us;
+
+ seq_puts(s, "Tx/Latency bin:");
+ for (bin = 0; bin < WIL_NUM_LATENCY_BINS; bin++) {
+ seq_printf(s, " %lld",
+ p->tx_latency_bins[bin]);
+ num_packets += p->tx_latency_bins[bin];
+ }
+ seq_puts(s, "\n");
+ if (!num_packets)
+ continue;
+ do_div(tx_latency_avg, num_packets);
+ seq_printf(s, "Tx/Latency min/avg/max (us): %d/%lld/%d",
+ p->stats.tx_latency_min_us,
+ tx_latency_avg,
+ p->stats.tx_latency_max_us);
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int wil_tx_latency_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_tx_latency_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int val, rc, i;
+ bool enable;
+
+ rc = kstrtoint_from_user(buf, len, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+ if (val == 1)
+ /* default resolution */
+ val = 500;
+ if (val && (val < 50 || val > 1000)) {
+ wil_err(wil, "Invalid resolution %d\n", val);
+ return -EINVAL;
+ }
+
+ enable = !!val;
+ if (wil->tx_latency == enable)
+ return len;
+
+ wil_info(wil, "%s TX latency measurements (resolution %dusec)\n",
+ enable ? "Enabling" : "Disabling", val);
+
+ if (enable) {
+ size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
+
+ wil->tx_latency_res = val;
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ struct wil_sta_info *sta = &wil->sta[i];
+
+ kfree(sta->tx_latency_bins);
+ sta->tx_latency_bins = kzalloc(sz, GFP_KERNEL);
+ if (!sta->tx_latency_bins)
+ return -ENOMEM;
+ sta->stats.tx_latency_min_us = U32_MAX;
+ sta->stats.tx_latency_max_us = 0;
+ sta->stats.tx_latency_total_us = 0;
+ }
+ }
+ wil->tx_latency = enable;
+
+ return len;
+}
+
+static const struct file_operations fops_tx_latency = {
+ .open = wil_tx_latency_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_tx_latency_write,
+ .llseek = seq_lseek,
+};
+
+static void wil_link_stats_print_basic(struct wil6210_vif *vif,
+ struct seq_file *s,
+ struct wmi_link_stats_basic *basic)
+{
+ char per[5] = "?";
+
+ if (basic->per_average != 0xff)
+ snprintf(per, sizeof(per), "%d%%", basic->per_average);
+
+ seq_printf(s, "CID %d {\n"
+ "\tTxMCS %s TxTpt %d\n"
+ "\tGoodput(rx:tx) %d:%d\n"
+ "\tRxBcastFrames %d\n"
+ "\tRSSI %d SQI %d SNR %d PER %s\n"
+ "\tRx RFC %d Ant num %d\n"
+ "\tSectors(rx:tx) my %d:%d peer %d:%d\n"
+ "}\n",
+ basic->cid,
+ WIL_EXTENDED_MCS_CHECK(basic->bf_mcs),
+ le32_to_cpu(basic->tx_tpt),
+ le32_to_cpu(basic->rx_goodput),
+ le32_to_cpu(basic->tx_goodput),
+ le32_to_cpu(basic->rx_bcast_frames),
+ basic->rssi, basic->sqi, basic->snr, per,
+ basic->selected_rfc, basic->rx_effective_ant_num,
+ basic->my_rx_sector, basic->my_tx_sector,
+ basic->other_rx_sector, basic->other_tx_sector);
+}
+
+static void wil_link_stats_print_global(struct wil6210_priv *wil,
+ struct seq_file *s,
+ struct wmi_link_stats_global *global)
+{
+ seq_printf(s, "Frames(rx:tx) %d:%d\n"
+ "BA Frames(rx:tx) %d:%d\n"
+ "Beacons %d\n"
+ "Rx Errors (MIC:CRC) %d:%d\n"
+ "Tx Errors (no ack) %d\n",
+ le32_to_cpu(global->rx_frames),
+ le32_to_cpu(global->tx_frames),
+ le32_to_cpu(global->rx_ba_frames),
+ le32_to_cpu(global->tx_ba_frames),
+ le32_to_cpu(global->tx_beacons),
+ le32_to_cpu(global->rx_mic_errors),
+ le32_to_cpu(global->rx_crc_errors),
+ le32_to_cpu(global->tx_fail_no_ack));
+}
+
+static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
+ struct seq_file *s)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_basic *stats;
+ int i;
+
+ if (!vif->fw_stats_ready) {
+ seq_puts(s, "no statistics\n");
+ return;
+ }
+
+ seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (wil->sta[i].mid != vif->mid)
+ continue;
+
+ stats = &wil->sta[i].fw_stats_basic;
+ wil_link_stats_print_basic(vif, s, stats);
+ }
+}
+
+static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_vif *vif;
+ int i, rc;
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ /* iterate over all MIDs and show per-cid statistics. Then show the
+ * global statistics
+ */
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+
+ seq_printf(s, "MID %d ", i);
+ if (!vif) {
+ seq_puts(s, "unused\n");
+ continue;
+ }
+
+ wil_link_stats_debugfs_show_vif(vif, s);
+ }
+
+ mutex_unlock(&wil->vif_mutex);
+
+ return 0;
+}
+
+static int wil_link_stats_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_debugfs_show, inode->i_private);
+}
+
+static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int cid, interval, rc, i;
+ struct wil6210_vif *vif;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ /* specify cid (use -1 for all cids) and snapshot interval in ms */
+ rc = sscanf(kbuf, "%d %d", &cid, &interval);
+ kfree(kbuf);
+ if (rc < 0)
+ return rc;
+ if (rc < 2 || interval < 0)
+ return -EINVAL;
+
+ wil_info(wil, "request link statistics, cid %d interval %d\n",
+ cid, interval);
+
+ rc = mutex_lock_interruptible(&wil->vif_mutex);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+ if (!vif)
+ continue;
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_BASIC,
+ (cid == -1 ? 0xff : cid), interval);
+ if (rc)
+ wil_err(wil, "link statistics failed for mid %d\n", i);
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ return len;
+}
+
+static const struct file_operations fops_link_stats = {
+ .open = wil_link_stats_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_write,
+ .llseek = seq_lseek,
+};
+
+static int
+wil_link_stats_global_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (!wil->fw_stats_global.ready)
+ return 0;
+
+ seq_printf(s, "TSF %lld\n", wil->fw_stats_global.tsf);
+ wil_link_stats_print_global(wil, s, &wil->fw_stats_global.stats);
+
+ return 0;
+}
+
+static int
+wil_link_stats_global_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_stats_global_debugfs_show,
+ inode->i_private);
+}
+
+static ssize_t
+wil_link_stats_global_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int interval, rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+
+ /* specify snapshot interval in ms */
+ rc = kstrtoint_from_user(buf, len, 0, &interval);
+ if (rc || interval < 0) {
+ wil_err(wil, "Invalid argument\n");
+ return -EINVAL;
+ }
+
+ wil_info(wil, "request global link stats, interval %d\n", interval);
+
+ rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_GLOBAL, 0, interval);
+ if (rc)
+ wil_err(wil, "global link stats failed %d\n", rc);
+
+ return rc ? rc : len;
+}
+
+static const struct file_operations fops_link_stats_global = {
+ .open = wil_link_stats_global_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_link_stats_global_write,
+ .llseek = seq_lseek,
+};
+
+static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[80];
+ int n;
+
+ n = snprintf(buf, sizeof(buf),
+ "led_id is set to %d, echo 1 to enable, 0 to disable\n",
+ led_id);
+
+ n = min_t(int, n, sizeof(buf));
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, n);
+}
+
+static ssize_t wil_write_file_led_cfg(struct file *file,
+ const char __user *buf_,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int val;
+ int rc;
+
+ rc = kstrtoint_from_user(buf_, count, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ wil_info(wil, "%s led %d\n", val ? "Enabling" : "Disabling", led_id);
+ rc = wmi_led_cfg(wil, val);
+ if (rc) {
+ wil_info(wil, "%s led %d failed\n",
+ val ? "Enabling" : "Disabling", led_id);
+ return rc;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_led_cfg = {
+ .read = wil_read_file_led_cfg,
+ .write = wil_write_file_led_cfg,
+ .open = simple_open,
+};
+
+/* led_blink_time, write:
+ * "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast>
+ */
+static ssize_t wil_write_led_blink_time(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%d %d %d %d %d %d",
+ &led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+ &led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+ &led_blink_time[WIL_LED_TIME_MED].on_ms,
+ &led_blink_time[WIL_LED_TIME_MED].off_ms,
+ &led_blink_time[WIL_LED_TIME_FAST].on_ms,
+ &led_blink_time[WIL_LED_TIME_FAST].off_ms);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+ if (rc < 6)
+ return -EINVAL;
+
+ return len;
+}
+
+static ssize_t wil_read_led_blink_time(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char text[400];
+
+ snprintf(text, sizeof(text),
+ "To set led blink on/off time variables write:\n"
+ "<blink_on_slow> <blink_off_slow> <blink_on_med> "
+ "<blink_off_med> <blink_on_fast> <blink_off_fast>\n"
+ "The current values are:\n"
+ "%d %d %d %d %d %d\n",
+ led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+ led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+ led_blink_time[WIL_LED_TIME_MED].on_ms,
+ led_blink_time[WIL_LED_TIME_MED].off_ms,
+ led_blink_time[WIL_LED_TIME_FAST].on_ms,
+ led_blink_time[WIL_LED_TIME_FAST].off_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ sizeof(text));
+}
+
+static const struct file_operations fops_led_blink_time = {
+ .read = wil_read_led_blink_time,
+ .write = wil_write_led_blink_time,
+ .open = simple_open,
+};
+
+/*---------FW capabilities------------*/
+static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
+ wil->fw_capabilities);
+
+ return 0;
+}
+
+static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_capabilities_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_capabilities = {
+ .open = wil_fw_capabilities_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------FW version------------*/
+static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (wil->fw_version[0])
+ seq_printf(s, "%s\n", wil->fw_version);
+ else
+ seq_puts(s, "N/A\n");
+
+ return 0;
+}
+
+static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_version_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_version = {
+ .open = wil_fw_version_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------suspend_stats---------*/
+static ssize_t wil_write_suspend_stats(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+
+ memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
+
+ return len;
+}
+
+static ssize_t wil_read_suspend_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ char *text;
+ int n, ret, text_size = 500;
+
+ text = kmalloc(text_size, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+ n = snprintf(text, text_size,
+ "Radio on suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "rejected by device:%ld\n"
+ "Radio off suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "General statistics:\n"
+ "rejected by host:%ld\n",
+ wil->suspend_stats.r_on.successful_suspends,
+ wil->suspend_stats.r_on.failed_suspends,
+ wil->suspend_stats.r_on.successful_resumes,
+ wil->suspend_stats.r_on.failed_resumes,
+ wil->suspend_stats.rejected_by_device,
+ wil->suspend_stats.r_off.successful_suspends,
+ wil->suspend_stats.r_off.failed_suspends,
+ wil->suspend_stats.r_off.successful_resumes,
+ wil->suspend_stats.r_off.failed_resumes,
+ wil->suspend_stats.rejected_by_host);
+
+ n = min_t(int, n, text_size);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
+
+ kfree(text);
+
+ return ret;
+}
+
+static const struct file_operations fops_suspend_stats = {
+ .read = wil_read_suspend_stats,
+ .write = wil_write_suspend_stats,
+ .open = simple_open,
+};
+
+/*---------compressed_rx_status---------*/
+static ssize_t wil_compressed_rx_status_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct wil6210_priv *wil = s->private;
+ int compressed_rx_status;
+ int rc;
+
+ rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ if (wil_has_active_ifaces(wil, true, false)) {
+ wil_err(wil, "cannot change edma config after iface is up\n");
+ return -EPERM;
+ }
+
+ wil_info(wil, "%sable compressed_rx_status\n",
+ compressed_rx_status ? "En" : "Dis");
+
+ wil->use_compressed_rx_status = compressed_rx_status;
+
+ return len;
+}
+
+static int
+wil_compressed_rx_status_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "%d\n", wil->use_compressed_rx_status);
+
+ return 0;
+}
+
+static int
+wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_compressed_rx_status_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_compressed_rx_status = {
+ .open = wil_compressed_rx_status_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .write = wil_compressed_rx_status_write,
+ .llseek = seq_lseek,
+};
+
+/*----------------*/
+static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
+ struct dentry *dbg)
+{
+ int i;
+ char name[32];
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ struct wil_blob_wrapper *wil_blob = &wil->blobs[i];
+ struct debugfs_blob_wrapper *blob = &wil_blob->blob;
+ const struct fw_map *map = &fw_mapping[i];
+
+ if (!map->name)
+ continue;
+
+ wil_blob->wil = wil;
+ blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
+ blob->size = map->to - map->from;
+ snprintf(name, sizeof(name), "blob_%s", map->name);
+ wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob);
+ }
+}
+
+/* misc files */
+static const struct {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+} dbg_files[] = {
+ {"mbox", 0444, &mbox_fops},
+ {"rings", 0444, &ring_fops},
+ {"stations", 0444, &sta_fops},
+ {"mids", 0444, &mids_fops},
+ {"desc", 0444, &txdesc_fops},
+ {"bf", 0444, &bf_fops},
+ {"mem_val", 0644, &memread_fops},
+ {"rxon", 0244, &fops_rxon},
+ {"tx_mgmt", 0244, &fops_txmgmt},
+ {"wmi_send", 0244, &fops_wmi},
+ {"back", 0644, &fops_back},
+ {"pmccfg", 0644, &fops_pmccfg},
+ {"pmcdata", 0444, &fops_pmcdata},
+ {"pmcring", 0444, &fops_pmcring},
+ {"temp", 0444, &temp_fops},
+ {"link", 0444, &link_fops},
+ {"info", 0444, &info_fops},
+ {"recovery", 0644, &fops_recovery},
+ {"led_cfg", 0644, &fops_led_cfg},
+ {"led_blink_time", 0644, &fops_led_blink_time},
+ {"fw_capabilities", 0444, &fops_fw_capabilities},
+ {"fw_version", 0444, &fops_fw_version},
+ {"suspend_stats", 0644, &fops_suspend_stats},
+ {"compressed_rx_status", 0644, &fops_compressed_rx_status},
+ {"srings", 0444, &srings_fops},
+ {"status_msg", 0444, &status_msg_fops},
+ {"rx_buff_mgmt", 0444, &rx_buff_mgmt_fops},
+ {"tx_latency", 0644, &fops_tx_latency},
+ {"link_stats", 0644, &fops_link_stats},
+ {"link_stats_global", 0644, &fops_link_stats_global},
+ {"rbufcap", 0244, &fops_rbufcap},
+};
+
+static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
+ struct dentry *dbg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbg_files); i++)
+ debugfs_create_file(dbg_files[i].name, dbg_files[i].mode, dbg,
+ wil, dbg_files[i].fops);
+}
+
+/* interrupt control blocks */
+static const struct {
+ const char *name;
+ u32 icr_off;
+} dbg_icr[] = {
+ {"USER_ICR", HOSTADDR(RGF_USER_USER_ICR)},
+ {"DMA_EP_TX_ICR", HOSTADDR(RGF_DMA_EP_TX_ICR)},
+ {"DMA_EP_RX_ICR", HOSTADDR(RGF_DMA_EP_RX_ICR)},
+ {"DMA_EP_MISC_ICR", HOSTADDR(RGF_DMA_EP_MISC_ICR)},
+};
+
+static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
+ struct dentry *dbg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbg_icr); i++)
+ wil6210_debugfs_create_ISR(wil, dbg_icr[i].name, dbg,
+ dbg_icr[i].icr_off);
+}
+
+#define WIL_FIELD(name, mode, type) { __stringify(name), mode, \
+ offsetof(struct wil6210_priv, name), type}
+
+/* fields in struct wil6210_priv */
+static const struct dbg_off dbg_wil_off[] = {
+ WIL_FIELD(status[0], 0644, doff_ulong),
+ WIL_FIELD(hw_version, 0444, doff_x32),
+ WIL_FIELD(recovery_count, 0444, doff_u32),
+ WIL_FIELD(discovery_mode, 0644, doff_u8),
+ WIL_FIELD(chip_revision, 0444, doff_u8),
+ WIL_FIELD(abft_len, 0644, doff_u8),
+ WIL_FIELD(wakeup_trigger, 0644, doff_u8),
+ WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
+ WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
+ WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
+ WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
+ WIL_FIELD(amsdu_en, 0644, doff_u8),
+ {},
+};
+
+static const struct dbg_off dbg_wil_regs[] = {
+ {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
+ doff_io32},
+ {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {"RGF_USER_USAGE_2", 0444, HOSTADDR(RGF_USER_USAGE_2), doff_io32},
+ {},
+};
+
+/* static parameters */
+static const struct dbg_off dbg_statics[] = {
+ {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
+ {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
+ {"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
+ {"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
+ {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
+ {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
+ {"drop_if_ring_full", 0644, (ulong)&drop_if_ring_full, doff_u8},
+ {},
+};
+
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+ ARRAY_SIZE(dbg_wil_regs) - 1 +
+ ARRAY_SIZE(pseudo_isr_off) - 1 +
+ ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+ ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+ ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+ struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+ wil_to_wiphy(wil)->debugfsdir);
+ if (IS_ERR_OR_NULL(dbg))
+ return -ENODEV;
+
+ wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+ sizeof(struct wil_debugfs_iomem_data),
+ GFP_KERNEL);
+ if (!wil->dbg_data.data_arr) {
+ debugfs_remove_recursive(dbg);
+ wil->debug = NULL;
+ return -ENOMEM;
+ }
+
+ wil->dbg_data.iomem_data_count = 0;
+
+ wil_pmc_init(wil);
+
+ wil6210_debugfs_init_files(wil, dbg);
+ wil6210_debugfs_init_isr(wil, dbg);
+ wil6210_debugfs_init_blobs(wil, dbg);
+ wil6210_debugfs_init_offset(wil, dbg, wil, dbg_wil_off);
+ wil6210_debugfs_init_offset(wil, dbg, (void * __force)wil->csr,
+ dbg_wil_regs);
+ wil6210_debugfs_init_offset(wil, dbg, NULL, dbg_statics);
+
+ wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+
+ wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+ return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+ int i;
+
+ debugfs_remove_recursive(wil->debug);
+ wil->debug = NULL;
+
+ kfree(wil->dbg_data.data_arr);
+ for (i = 0; i < wil->max_assoc_sta; i++)
+ kfree(wil->sta[i].tx_latency_bins);
+
+ /* free pmc memory without sending command to fw, as it will
+ * be reset on the way down anyway
+ */
+ wil_pmc_free(wil, false);
+}
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
new file mode 100644
index 000000000..29a9f17c2
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+
+static int
+wil_ethtoolops_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *cp,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ u32 tx_itr_en, tx_itr_val = 0;
+ u32 rx_itr_en, rx_itr_val = 0;
+ int ret;
+
+ mutex_lock(&wil->mutex);
+ wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ goto out;
+
+ tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
+ if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
+ tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
+
+ rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
+ if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
+ rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
+
+ wil_pm_runtime_put(wil);
+
+ cp->tx_coalesce_usecs = tx_itr_val;
+ cp->rx_coalesce_usecs = rx_itr_val;
+ ret = 0;
+
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
+}
+
+static int
+wil_ethtoolops_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *cp,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ int ret;
+
+ mutex_lock(&wil->mutex);
+ wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
+ cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
+
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
+ * ignore other parameters
+ */
+
+ if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX ||
+ cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX)
+ goto out_bad;
+
+ wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
+ wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ goto out;
+
+ wil->txrx_ops.configure_interrupt_moderation(wil);
+
+ wil_pm_runtime_put(wil);
+ ret = 0;
+
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
+
+out_bad:
+ wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n");
+ print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4,
+ cp, sizeof(*cp), false);
+ mutex_unlock(&wil->mutex);
+ return -EINVAL;
+}
+
+static const struct ethtool_ops wil_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = cfg80211_get_drvinfo,
+ .get_coalesce = wil_ethtoolops_get_coalesce,
+ .set_coalesce = wil_ethtoolops_set_coalesce,
+};
+
+void wil_set_ethtoolops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &wil_ethtool_ops;
+}
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
new file mode 100644
index 000000000..6d3413a44
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include "wil6210.h"
+#include "fw.h"
+
+MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
+MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
+MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
+MODULE_FIRMWARE(WIL_FW_NAME_TALYN);
+MODULE_FIRMWARE(WIL_BRD_NAME_TALYN);
+
+static
+void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(val, d++);
+}
+
+#include "fw_inc.c"
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
new file mode 100644
index 000000000..440614d61
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+#ifndef __WIL_FW_H__
+#define __WIL_FW_H__
+
+#define WIL_FW_SIGNATURE (0x36323130) /* '0126' */
+#define WIL_FW_FMT_VERSION (1) /* format version driver supports */
+
+enum wil_fw_record_type {
+ wil_fw_type_comment = 1,
+ wil_fw_type_data = 2,
+ wil_fw_type_fill = 3,
+ wil_fw_type_action = 4,
+ wil_fw_type_verify = 5,
+ wil_fw_type_file_header = 6,
+ wil_fw_type_direct_write = 7,
+ wil_fw_type_gateway_data = 8,
+ wil_fw_type_gateway_data4 = 9,
+};
+
+struct wil_fw_record_head {
+ __le16 type; /* enum wil_fw_record_type */
+ __le16 flags; /* to be defined */
+ __le32 size; /* whole record, bytes after head */
+} __packed;
+
+/* data block. write starting from @addr
+ * data_size inferred from the @head.size. For this case,
+ * data_size = @head.size - offsetof(struct wil_fw_record_data, data)
+ */
+struct wil_fw_record_data { /* type == wil_fw_type_data */
+ __le32 addr;
+ __le32 data[]; /* [data_size], see above */
+} __packed;
+
+/* fill with constant @value, @size bytes starting from @addr */
+struct wil_fw_record_fill { /* type == wil_fw_type_fill */
+ __le32 addr;
+ __le32 value;
+ __le32 size;
+} __packed;
+
+/* free-form comment
+ * for informational purpose, data_size is @head.size from record header
+ */
+struct wil_fw_record_comment { /* type == wil_fw_type_comment */
+ u8 data[0]; /* free-form data [data_size], see above */
+} __packed;
+
+/* Comment header - common for all comment record types */
+struct wil_fw_record_comment_hdr {
+ __le32 magic;
+};
+
+/* FW capabilities encoded inside a comment record */
+#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
+struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
+ /* identifies capabilities record */
+ struct wil_fw_record_comment_hdr hdr;
+ /* capabilities (variable size), see enum wmi_fw_capability */
+ u8 capabilities[];
+} __packed;
+
+/* FW VIF concurrency encoded inside a comment record
+ * Format is similar to wiphy->iface_combinations
+ */
+#define WIL_FW_CONCURRENCY_MAGIC (0xfedccdef)
+#define WIL_FW_CONCURRENCY_REC_VER 1
+struct wil_fw_concurrency_limit {
+ __le16 max; /* maximum number of interfaces of these types */
+ __le16 types; /* interface types (bit mask of enum nl80211_iftype) */
+} __packed;
+
+struct wil_fw_concurrency_combo {
+ u8 n_limits; /* number of wil_fw_concurrency_limit entries */
+ u8 max_interfaces; /* max number of concurrent interfaces allowed */
+ u8 n_diff_channels; /* total number of different channels allowed */
+ u8 same_bi; /* for APs, 1 if all APs must have same BI */
+ /* keep last - concurrency limits, variable size by n_limits */
+ struct wil_fw_concurrency_limit limits[];
+} __packed;
+
+struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */
+ /* identifies concurrency record */
+ __le32 magic;
+ /* structure version, currently always 1 */
+ u8 version;
+ /* maximum number of supported MIDs _in addition_ to MID 0 */
+ u8 n_mids;
+ /* number of concurrency combinations that follow */
+ __le16 n_combos;
+ /* keep last - combinations, variable size by n_combos */
+ struct wil_fw_concurrency_combo combos[];
+} __packed;
+
+/* brd file info encoded inside a comment record */
+#define WIL_BRD_FILE_MAGIC (0xabcddcbb)
+
+struct brd_info {
+ __le32 base_addr;
+ __le32 max_size_bytes;
+} __packed;
+
+struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
+ /* identifies brd file record */
+ struct wil_fw_record_comment_hdr hdr;
+ __le32 version;
+ struct brd_info brd_info[];
+} __packed;
+
+/* perform action
+ * data_size = @head.size - offsetof(struct wil_fw_record_action, data)
+ */
+struct wil_fw_record_action { /* type == wil_fw_type_action */
+ __le32 action; /* action to perform: reset, wait for fw ready etc. */
+ __le32 data[]; /* action specific, [data_size], see above */
+} __packed;
+
+/* data block for struct wil_fw_record_direct_write */
+struct wil_fw_data_dwrite {
+ __le32 addr;
+ __le32 value;
+ __le32 mask;
+} __packed;
+
+/* write @value to the @addr,
+ * preserve original bits accordingly to the @mask
+ * data_size is @head.size where @head is record header
+ */
+struct wil_fw_record_direct_write { /* type == wil_fw_type_direct_write */
+ struct wil_fw_data_dwrite data[0];
+} __packed;
+
+/* verify condition: [@addr] & @mask == @value
+ * if condition not met, firmware download fails
+ */
+struct wil_fw_record_verify { /* type == wil_fw_verify */
+ __le32 addr; /* read from this address */
+ __le32 value; /* reference value */
+ __le32 mask; /* mask for verification */
+} __packed;
+
+/* file header
+ * First record of every file
+ */
+/* the FW version prefix in the comment */
+#define WIL_FW_VERSION_PREFIX "FW version: "
+#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
+struct wil_fw_record_file_header {
+ __le32 signature ; /* Wilocity signature */
+ __le32 reserved;
+ __le32 crc; /* crc32 of the following data */
+ __le32 version; /* format version */
+ __le32 data_len; /* total data in file, including this record */
+ u8 comment[32]; /* short description */
+} __packed;
+
+/* 1-dword gateway */
+/* data block for the struct wil_fw_record_gateway_data */
+struct wil_fw_data_gw {
+ __le32 addr;
+ __le32 value;
+} __packed;
+
+/* gateway write block.
+ * write starting address and values from the data buffer
+ * through the gateway
+ * data_size inferred from the @head.size. For this case,
+ * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data, data)
+ */
+struct wil_fw_record_gateway_data { /* type == wil_fw_type_gateway_data */
+ __le32 gateway_addr_addr;
+ __le32 gateway_value_addr;
+ __le32 gateway_cmd_addr;
+ __le32 gateway_ctrl_address;
+#define WIL_FW_GW_CTL_BUSY BIT(29) /* gateway busy performing operation */
+#define WIL_FW_GW_CTL_RUN BIT(30) /* start gateway operation */
+ __le32 command;
+ struct wil_fw_data_gw data[]; /* total size [data_size], see above */
+} __packed;
+
+/* 4-dword gateway */
+/* data block for the struct wil_fw_record_gateway_data4 */
+struct wil_fw_data_gw4 {
+ __le32 addr;
+ __le32 value[4];
+} __packed;
+
+/* gateway write block.
+ * write starting address and values from the data buffer
+ * through the gateway
+ * data_size inferred from the @head.size. For this case,
+ * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data4, data)
+ */
+struct wil_fw_record_gateway_data4 { /* type == wil_fw_type_gateway_data4 */
+ __le32 gateway_addr_addr;
+ __le32 gateway_value_addr[4];
+ __le32 gateway_cmd_addr;
+ __le32 gateway_ctrl_address; /* same logic as for 1-dword gw */
+ __le32 command;
+ struct wil_fw_data_gw4 data[]; /* total size [data_size], see above */
+} __packed;
+
+#endif /* __WIL_FW_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
new file mode 100644
index 000000000..fbc84c034
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+/* Algorithmic part of the firmware download.
+ * To be included in the container file providing framework
+ */
+
+#define wil_err_fw(wil, fmt, arg...) wil_err(wil, "ERR[ FW ]" fmt, ##arg)
+#define wil_dbg_fw(wil, fmt, arg...) wil_dbg(wil, "DBG[ FW ]" fmt, ##arg)
+#define wil_hex_dump_fw(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[ FW ]" prefix_str, \
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+ void __iomem **ioaddr, __le32 val,
+ u32 size, const char *msg)
+{
+ *ioaddr = wmi_buffer_block(wil, val, size);
+ if (!(*ioaddr)) {
+ wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+ return false;
+ }
+ return true;
+}
+
+/**
+ * wil_fw_verify - verify firmware file validity
+ *
+ * perform various checks for the firmware file header.
+ * records are not validated.
+ *
+ * Return file size or negative error
+ */
+static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
+{
+ const struct wil_fw_record_head *hdr = (const void *)data;
+ struct wil_fw_record_file_header fh;
+ const struct wil_fw_record_file_header *fh_;
+ u32 crc;
+ u32 dlen;
+
+ if (size % 4) {
+ wil_err_fw(wil, "image size not aligned: %zu\n", size);
+ return -EINVAL;
+ }
+ /* have enough data for the file header? */
+ if (size < sizeof(*hdr) + sizeof(fh)) {
+ wil_err_fw(wil, "file too short: %zu bytes\n", size);
+ return -EINVAL;
+ }
+
+ /* start with the file header? */
+ if (le16_to_cpu(hdr->type) != wil_fw_type_file_header) {
+ wil_err_fw(wil, "no file header\n");
+ return -EINVAL;
+ }
+
+ /* data_len */
+ fh_ = (struct wil_fw_record_file_header *)&hdr[1];
+ dlen = le32_to_cpu(fh_->data_len);
+ if (dlen % 4) {
+ wil_err_fw(wil, "data length not aligned: %lu\n", (ulong)dlen);
+ return -EINVAL;
+ }
+ if (size < dlen) {
+ wil_err_fw(wil, "file truncated at %zu/%lu\n",
+ size, (ulong)dlen);
+ return -EINVAL;
+ }
+ if (dlen < sizeof(*hdr) + sizeof(fh)) {
+ wil_err_fw(wil, "data length too short: %lu\n", (ulong)dlen);
+ return -EINVAL;
+ }
+
+ /* signature */
+ if (le32_to_cpu(fh_->signature) != WIL_FW_SIGNATURE) {
+ wil_err_fw(wil, "bad header signature: 0x%08x\n",
+ le32_to_cpu(fh_->signature));
+ return -EINVAL;
+ }
+
+ /* version */
+ if (le32_to_cpu(fh_->version) > WIL_FW_FMT_VERSION) {
+ wil_err_fw(wil, "unsupported header version: %d\n",
+ le32_to_cpu(fh_->version));
+ return -EINVAL;
+ }
+
+ /* checksum. ~crc32(~0, data, size) when fh.crc set to 0*/
+ fh = *fh_;
+ fh.crc = 0;
+
+ crc = crc32_le(~0, (unsigned char const *)hdr, sizeof(*hdr));
+ crc = crc32_le(crc, (unsigned char const *)&fh, sizeof(fh));
+ crc = crc32_le(crc, (unsigned char const *)&fh_[1],
+ dlen - sizeof(*hdr) - sizeof(fh));
+ crc = ~crc;
+
+ if (crc != le32_to_cpu(fh_->crc)) {
+ wil_err_fw(wil, "checksum mismatch:"
+ " calculated for %lu bytes 0x%08x != 0x%08x\n",
+ (ulong)dlen, crc, le32_to_cpu(fh_->crc));
+ return -EINVAL;
+ }
+
+ return (int)dlen;
+}
+
+static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ return 0;
+}
+
+static int
+fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_capabilities *rec = data;
+ size_t capa_size;
+
+ if (size < sizeof(*rec)) {
+ wil_err_fw(wil, "capabilities record too short: %zu\n", size);
+ /* let the FW load anyway */
+ return 0;
+ }
+
+ capa_size = size - offsetof(struct wil_fw_record_capabilities,
+ capabilities);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ memcpy(wil->fw_capabilities, rec->capabilities,
+ min_t(size_t, sizeof(wil->fw_capabilities), capa_size));
+ wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
+ rec->capabilities, capa_size, false);
+ return 0;
+}
+
+static int
+fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_brd_file *rec = data;
+ u32 max_num_ent, i, ent_size;
+
+ if (size <= offsetof(struct wil_fw_record_brd_file, brd_info)) {
+ wil_err(wil, "board record too short, size %zu\n", size);
+ return -EINVAL;
+ }
+
+ ent_size = size - offsetof(struct wil_fw_record_brd_file, brd_info);
+ max_num_ent = ent_size / sizeof(struct brd_info);
+
+ if (!max_num_ent) {
+ wil_err(wil, "brd info entries are missing\n");
+ return -EINVAL;
+ }
+
+ wil->brd_info = kcalloc(max_num_ent, sizeof(struct wil_brd_info),
+ GFP_KERNEL);
+ if (!wil->brd_info)
+ return -ENOMEM;
+
+ for (i = 0; i < max_num_ent; i++) {
+ wil->brd_info[i].file_addr =
+ le32_to_cpu(rec->brd_info[i].base_addr);
+ wil->brd_info[i].file_max_size =
+ le32_to_cpu(rec->brd_info[i].max_size_bytes);
+
+ if (!wil->brd_info[i].file_addr)
+ break;
+
+ wil_dbg_fw(wil,
+ "brd info %d: file_addr 0x%x, file_max_size %d\n",
+ i, wil->brd_info[i].file_addr,
+ wil->brd_info[i].file_max_size);
+ }
+
+ wil->num_of_brd_entries = i;
+ if (wil->num_of_brd_entries == 0) {
+ kfree(wil->brd_info);
+ wil->brd_info = NULL;
+ wil_dbg_fw(wil,
+ "no valid brd info entries, using brd file addr\n");
+
+ } else {
+ wil_dbg_fw(wil, "num of brd info entries %d\n",
+ wil->num_of_brd_entries);
+ }
+
+ return 0;
+}
+
+static int
+fw_handle_concurrency(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_concurrency *rec = data;
+ const struct wil_fw_concurrency_combo *combo;
+ const struct wil_fw_concurrency_limit *limit;
+ size_t remain, lsize;
+ int i, n_combos;
+
+ if (size < sizeof(*rec)) {
+ wil_err_fw(wil, "concurrency record too short: %zu\n", size);
+ /* continue, let the FW load anyway */
+ return 0;
+ }
+
+ n_combos = le16_to_cpu(rec->n_combos);
+ remain = size - offsetof(struct wil_fw_record_concurrency, combos);
+ combo = rec->combos;
+ for (i = 0; i < n_combos; i++) {
+ if (remain < sizeof(*combo))
+ goto out_short;
+ remain -= sizeof(*combo);
+ limit = combo->limits;
+ lsize = combo->n_limits * sizeof(*limit);
+ if (remain < lsize)
+ goto out_short;
+ remain -= lsize;
+ limit += combo->n_limits;
+ combo = (struct wil_fw_concurrency_combo *)limit;
+ }
+
+ return wil_cfg80211_iface_combinations_from_fw(wil, rec);
+out_short:
+ wil_err_fw(wil, "concurrency record truncated\n");
+ return 0;
+}
+
+static int
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_comment_hdr *hdr = data;
+ u32 magic;
+ int rc = 0;
+
+ if (size < sizeof(*hdr))
+ return 0;
+
+ magic = le32_to_cpu(hdr->magic);
+
+ switch (magic) {
+ case WIL_FW_CAPABILITIES_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n");
+ rc = fw_handle_capabilities(wil, data, size);
+ break;
+ case WIL_BRD_FILE_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n");
+ rc = fw_handle_brd_file(wil, data, size);
+ break;
+ case WIL_FW_CONCURRENCY_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_FW_CONCURRENCY_MAGIC\n");
+ rc = fw_handle_concurrency(wil, data, size);
+ break;
+ default:
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+ data, size, true);
+ }
+
+ return rc;
+}
+
+static int __fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size, __le32 addr)
+{
+ const struct wil_fw_record_data *d = data;
+ void __iomem *dst;
+ size_t s = size - sizeof(*d);
+
+ if (size < sizeof(*d) + sizeof(u32)) {
+ wil_err_fw(wil, "data record too short: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if (!wil_fw_addr_check(wil, &dst, addr, s, "address"))
+ return -EINVAL;
+ wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s);
+ wil_memcpy_toio_32(dst, d->data, s);
+ wmb(); /* finish before processing next record */
+
+ return 0;
+}
+
+static int fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_data *d = data;
+
+ return __fw_handle_data(wil, data, size, d->addr);
+}
+
+static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_fill *d = data;
+ void __iomem *dst;
+ u32 v;
+ size_t s = (size_t)le32_to_cpu(d->size);
+
+ if (size != sizeof(*d)) {
+ wil_err_fw(wil, "bad size for fill record: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if (s < sizeof(u32)) {
+ wil_err_fw(wil, "fill size too short: %zu\n", s);
+ return -EINVAL;
+ }
+
+ if (s % sizeof(u32)) {
+ wil_err_fw(wil, "fill size not aligned: %zu\n", s);
+ return -EINVAL;
+ }
+
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
+
+ v = le32_to_cpu(d->value);
+ wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
+ le32_to_cpu(d->addr), v, s);
+ wil_memset_toio_32(dst, v, s);
+ wmb(); /* finish before processing next record */
+
+ return 0;
+}
+
+static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_file_header *d = data;
+
+ if (size != sizeof(*d)) {
+ wil_err_fw(wil, "file header length incorrect: %zu\n", size);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil, "new file, ver. %d, %i bytes\n",
+ d->version, d->data_len);
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
+ sizeof(d->comment), true);
+
+ if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
+ WIL_FW_VERSION_PREFIX_LEN))
+ memcpy(wil->fw_version,
+ d->comment + WIL_FW_VERSION_PREFIX_LEN,
+ min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
+ sizeof(wil->fw_version) - 1));
+
+ return 0;
+}
+
+static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_direct_write *d = data;
+ const struct wil_fw_data_dwrite *block = d->data;
+ int n, i;
+
+ if (size % sizeof(*block)) {
+ wil_err_fw(wil, "record size not aligned on %zu: %zu\n",
+ sizeof(*block), size);
+ return -EINVAL;
+ }
+ n = size / sizeof(*block);
+
+ for (i = 0; i < n; i++) {
+ void __iomem *dst;
+ u32 m = le32_to_cpu(block[i].mask);
+ u32 v = le32_to_cpu(block[i].value);
+ u32 x, y;
+
+ if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+ return -EINVAL;
+
+ x = readl(dst);
+ y = (x & m) | (v & ~m);
+ wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
+ "(old 0x%08x val 0x%08x mask 0x%08x)\n",
+ le32_to_cpu(block[i].addr), y, x, v, m);
+ writel(y, dst);
+ wmb(); /* finish before processing next record */
+ }
+
+ return 0;
+}
+
+static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
+ void __iomem *gwa_cmd, void __iomem *gwa_ctl, u32 gw_cmd,
+ u32 a)
+{
+ unsigned delay = 0;
+
+ writel(a, gwa_addr);
+ writel(gw_cmd, gwa_cmd);
+ wmb(); /* finish before activate gw */
+
+ writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+ do {
+ udelay(1); /* typical time is few usec */
+ if (delay++ > 100) {
+ wil_err_fw(wil, "gw timeout\n");
+ return -EINVAL;
+ }
+ } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+
+ return 0;
+}
+
+static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_gateway_data *d = data;
+ const struct wil_fw_data_gw *block = d->data;
+ void __iomem *gwa_addr;
+ void __iomem *gwa_val;
+ void __iomem *gwa_cmd;
+ void __iomem *gwa_ctl;
+ u32 gw_cmd;
+ int n, i;
+
+ if (size < sizeof(*d) + sizeof(*block)) {
+ wil_err_fw(wil, "gateway record too short: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if ((size - sizeof(*d)) % sizeof(*block)) {
+ wil_err_fw(wil, "gateway record data size"
+ " not aligned on %zu: %zu\n",
+ sizeof(*block), size - sizeof(*d));
+ return -EINVAL;
+ }
+ n = (size - sizeof(*d)) / sizeof(*block);
+
+ gw_cmd = le32_to_cpu(d->command);
+
+ wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
+ n, gw_cmd);
+
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr") ||
+ !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+ "gateway_value_addr") ||
+ !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
+
+ wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
+ " cmd 0x%08x ctl 0x%08x\n",
+ le32_to_cpu(d->gateway_addr_addr),
+ le32_to_cpu(d->gateway_value_addr),
+ le32_to_cpu(d->gateway_cmd_addr),
+ le32_to_cpu(d->gateway_ctrl_address));
+
+ for (i = 0; i < n; i++) {
+ int rc;
+ u32 a = le32_to_cpu(block[i].addr);
+ u32 v = le32_to_cpu(block[i].value);
+
+ wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
+ i, a, v);
+
+ writel(v, gwa_val);
+ rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_gateway_data4 *d = data;
+ const struct wil_fw_data_gw4 *block = d->data;
+ void __iomem *gwa_addr;
+ void __iomem *gwa_val[ARRAY_SIZE(block->value)];
+ void __iomem *gwa_cmd;
+ void __iomem *gwa_ctl;
+ u32 gw_cmd;
+ int n, i, k;
+
+ if (size < sizeof(*d) + sizeof(*block)) {
+ wil_err_fw(wil, "gateway4 record too short: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if ((size - sizeof(*d)) % sizeof(*block)) {
+ wil_err_fw(wil, "gateway4 record data size"
+ " not aligned on %zu: %zu\n",
+ sizeof(*block), size - sizeof(*d));
+ return -EINVAL;
+ }
+ n = (size - sizeof(*d)) / sizeof(*block);
+
+ gw_cmd = le32_to_cpu(d->command);
+
+ wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
+ n, gw_cmd);
+
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr"))
+ return -EINVAL;
+ for (k = 0; k < ARRAY_SIZE(block->value); k++)
+ if (!wil_fw_addr_check(wil, &gwa_val[k],
+ d->gateway_value_addr[k],
+ 0, "gateway_value_addr"))
+ return -EINVAL;
+ if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
+
+ wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
+ le32_to_cpu(d->gateway_addr_addr),
+ le32_to_cpu(d->gateway_cmd_addr),
+ le32_to_cpu(d->gateway_ctrl_address));
+ wil_hex_dump_fw("val addresses: ", DUMP_PREFIX_NONE, 16, 4,
+ d->gateway_value_addr, sizeof(d->gateway_value_addr),
+ false);
+
+ for (i = 0; i < n; i++) {
+ int rc;
+ u32 a = le32_to_cpu(block[i].addr);
+ u32 v[ARRAY_SIZE(block->value)];
+
+ for (k = 0; k < ARRAY_SIZE(block->value); k++)
+ v[k] = le32_to_cpu(block[i].value[k]);
+
+ wil_dbg_fw(wil, " gw4 write[%3d] [0x%08x] <==\n", i, a);
+ wil_hex_dump_fw(" val ", DUMP_PREFIX_NONE, 16, 4, v,
+ sizeof(v), false);
+
+ for (k = 0; k < ARRAY_SIZE(block->value); k++)
+ writel(v[k], gwa_val[k]);
+ rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct {
+ int type;
+ int (*load_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
+ int (*parse_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
+} wil_fw_handlers[] = {
+ {wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
+ {wil_fw_type_data, fw_handle_data, fw_ignore_section},
+ {wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
+ /* wil_fw_type_action */
+ /* wil_fw_type_verify */
+ {wil_fw_type_file_header, fw_handle_file_header,
+ fw_handle_file_header},
+ {wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
+ {wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
+ {wil_fw_type_gateway_data4, fw_handle_gateway_data4,
+ fw_ignore_section},
+};
+
+static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
+ const void *data, size_t size, bool load)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
+ if (wil_fw_handlers[i].type == type)
+ return load ?
+ wil_fw_handlers[i].load_handler(
+ wil, data, size) :
+ wil_fw_handlers[i].parse_handler(
+ wil, data, size);
+
+ wil_err_fw(wil, "unknown record type: %d\n", type);
+ return -EINVAL;
+}
+
+/**
+ * wil_fw_process - process section from FW file
+ * if load is true: Load the FW and uCode code and data to the
+ * corresponding device memory regions,
+ * otherwise only parse and look for capabilities
+ *
+ * Return error code
+ */
+static int wil_fw_process(struct wil6210_priv *wil, const void *data,
+ size_t size, bool load)
+{
+ int rc = 0;
+ const struct wil_fw_record_head *hdr;
+ size_t s, hdr_sz;
+
+ for (hdr = data;; hdr = (const void *)hdr + s, size -= s) {
+ if (size < sizeof(*hdr))
+ break;
+ hdr_sz = le32_to_cpu(hdr->size);
+ s = sizeof(*hdr) + hdr_sz;
+ if (s > size)
+ break;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
+ &hdr[1], hdr_sz, load);
+ if (rc)
+ return rc;
+ }
+ if (size) {
+ wil_err_fw(wil, "unprocessed bytes: %zu\n", size);
+ if (size >= sizeof(*hdr)) {
+ wil_err_fw(wil, "Stop at offset %ld"
+ " record type %d [%zd bytes]\n",
+ (long)((const void *)hdr - data),
+ le16_to_cpu(hdr->type), hdr_sz);
+ }
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * wil_request_firmware - Request firmware
+ *
+ * Request firmware image from the file
+ * If load is true, load firmware to device, otherwise
+ * only parse and extract capabilities
+ *
+ * Return error code
+ */
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load)
+{
+ int rc, rc1;
+ const struct firmware *fw;
+ size_t sz;
+ const void *d;
+
+ rc = request_firmware(&fw, name, wil_to_dev(wil));
+ if (rc) {
+ wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
+ return rc;
+ }
+ wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
+
+ /* re-initialize board info params */
+ wil->num_of_brd_entries = 0;
+ kfree(wil->brd_info);
+ wil->brd_info = NULL;
+
+ for (sz = fw->size, d = fw->data; sz; sz -= rc1, d += rc1) {
+ rc1 = wil_fw_verify(wil, d, sz);
+ if (rc1 < 0) {
+ rc = rc1;
+ goto out;
+ }
+ rc = wil_fw_process(wil, d, rc1, load);
+ if (rc < 0)
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+ if (rc)
+ wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
+ return rc;
+}
+
+/**
+ * wil_brd_process - process section from BRD file
+ *
+ * Return error code
+ */
+static int wil_brd_process(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ int rc = 0;
+ const struct wil_fw_record_head *hdr = data;
+ size_t s, hdr_sz = 0;
+ u16 type;
+ int i = 0;
+
+ /* Assuming the board file includes only one file header
+ * and one or several data records.
+ * Each record starts with wil_fw_record_head.
+ */
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ s = sizeof(*hdr) + le32_to_cpu(hdr->size);
+ if (s > size)
+ return -EINVAL;
+
+ /* Skip the header record and handle the data records */
+ size -= s;
+
+ for (hdr = data + s;; hdr = (const void *)hdr + s, size -= s, i++) {
+ if (size < sizeof(*hdr))
+ break;
+
+ if (i >= wil->num_of_brd_entries) {
+ wil_err_fw(wil,
+ "Too many brd records: %d, num of expected entries %d\n",
+ i, wil->num_of_brd_entries);
+ break;
+ }
+
+ hdr_sz = le32_to_cpu(hdr->size);
+ s = sizeof(*hdr) + hdr_sz;
+ if (wil->brd_info[i].file_max_size &&
+ hdr_sz > wil->brd_info[i].file_max_size)
+ return -EINVAL;
+ if (sizeof(*hdr) + hdr_sz > size)
+ return -EINVAL;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ type = le16_to_cpu(hdr->type);
+ if (type != wil_fw_type_data) {
+ wil_err_fw(wil,
+ "invalid record type for board file: %d\n",
+ type);
+ return -EINVAL;
+ }
+ if (hdr_sz < sizeof(struct wil_fw_record_data)) {
+ wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil,
+ "using info from fw file for record %d: addr[0x%08x], max size %d\n",
+ i, wil->brd_info[i].file_addr,
+ wil->brd_info[i].file_max_size);
+
+ rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
+ cpu_to_le32(wil->brd_info[i].file_addr));
+ if (rc)
+ return rc;
+ }
+
+ if (size) {
+ wil_err_fw(wil, "unprocessed bytes: %zu\n", size);
+ if (size >= sizeof(*hdr)) {
+ wil_err_fw(wil,
+ "Stop at offset %ld record type %d [%zd bytes]\n",
+ (long)((const void *)hdr - data),
+ le16_to_cpu(hdr->type), hdr_sz);
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * wil_request_board - Request board file
+ *
+ * Request board image from the file
+ * board file address and max size are read from FW file
+ * during initialization.
+ * brd file shall include one header and one data section.
+ *
+ * Return error code
+ */
+int wil_request_board(struct wil6210_priv *wil, const char *name)
+{
+ int rc, dlen;
+ const struct firmware *brd;
+
+ rc = request_firmware(&brd, name, wil_to_dev(wil));
+ if (rc) {
+ wil_err_fw(wil, "Failed to load brd %s\n", name);
+ return rc;
+ }
+ wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size);
+
+ /* Verify the header */
+ dlen = wil_fw_verify(wil, brd->data, brd->size);
+ if (dlen < 0) {
+ rc = dlen;
+ goto out;
+ }
+
+ /* Process the data records */
+ rc = wil_brd_process(wil, brd->data, dlen);
+
+out:
+ release_firmware(brd);
+ if (rc)
+ wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
+ return rc;
+}
+
+/**
+ * wil_fw_verify_file_exists - checks if firmware file exist
+ *
+ * @wil: driver context
+ * @name: firmware file name
+ *
+ * return value - boolean, true for success, false for failure
+ */
+bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
+{
+ const struct firmware *fw;
+ int rc;
+
+ rc = request_firmware(&fw, name, wil_to_dev(wil));
+ if (!rc)
+ release_firmware(fw);
+ else
+ wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc);
+ return !rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644
index 000000000..67172385a
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+#include "trace.h"
+
+/*
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IRQ_DISABLE_NO_HALP (0xF7FFFFFFUL)
+#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
+ BIT_DMA_EP_RX_ICR_RX_HTRSH)
+#define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \
+ (~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
+#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
+ BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
+#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
+#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
+#define WIL6210_IMC_MISC (WIL6210_IMC_MISC_NO_HALP | \
+ BIT_DMA_EP_MISC_ICR_HALP)
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+ BIT_DMA_PSEUDO_CAUSE_TX | \
+ BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+#define WIL_ICR_ICC_MISC_VALUE (0xF7FFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE (0UL)
+#define WIL_ICR_ICC_MISC_VALUE (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+ writel(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+ u32 x = readl(addr);
+
+ wil_icr_clear(x, addr);
+
+ return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
+static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
+static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
+ WIL6210_IRQ_DISABLE);
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
+{
+ wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
+ mask_halp ? "true" : "false");
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+ mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
+}
+
+void wil6210_mask_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "mask_halp\n");
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+ BIT_DMA_EP_MISC_ICR_HALP);
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "mask_irq_pseudo\n");
+
+ wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
+
+ clear_bit(wil_status_irqen, wil->status);
+}
+
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX);
+}
+
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_TX_EDMA);
+}
+
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+ bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
+
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
+}
+
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
+{
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
+ WIL6210_IMC_RX_EDMA);
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
+{
+ wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
+ unmask_halp ? "true" : "false");
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+ unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
+}
+
+static void wil6210_unmask_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "unmask_halp\n");
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+ BIT_DMA_EP_MISC_ICR_HALP);
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "unmask_irq_pseudo\n");
+
+ set_bit(wil_status_irqen, wil->status);
+
+ wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
+}
+
+void wil_mask_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "mask_irq\n");
+
+ wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_tx_edma(wil);
+ wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_rx_edma(wil);
+ wil6210_mask_irq_misc(wil, true);
+ wil6210_mask_irq_pseudo(wil);
+}
+
+void wil_unmask_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "unmask_irq\n");
+
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_MISC_VALUE);
+ wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+
+ wil6210_unmask_irq_pseudo(wil);
+ if (wil->use_enhanced_dma_hw) {
+ wil6210_unmask_irq_tx_edma(wil);
+ wil6210_unmask_irq_rx_edma(wil);
+ } else {
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ }
+ wil6210_unmask_irq_misc(wil, true);
+}
+
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
+{
+ u32 moderation;
+
+ wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
+
+ wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
+
+ /* Update RX and TX moderation */
+ moderation = wil->rx_max_burst_duration |
+ (WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
+ wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
+
+ /* Treat special events as regular
+ * (set bit 0 to 0x1 and clear bits 1-8)
+ */
+ wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
+ wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
+}
+
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
+{
+ struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
+
+ wil_dbg_irq(wil, "configure_interrupt_moderation\n");
+
+ /* disable interrupt moderation for monitor
+ * to get better timestamp precision
+ */
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR)
+ return;
+
+ /* Disable and clear tx counter before (re)configuration */
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+ wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
+ wil->tx_max_burst_duration);
+ /* Configure TX max burst duration timer to use usec units */
+ wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+ BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear tx idle counter before (re)configuration */
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+ wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
+ wil->tx_interframe_timeout);
+ /* Configure TX max burst duration timer to use usec units */
+ wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear rx counter before (re)configuration */
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+ wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
+ wil->rx_max_burst_duration);
+ /* Configure TX max burst duration timer to use usec units */
+ wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+ BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear rx idle counter before (re)configuration */
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+ wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
+ wil->rx_interframe_timeout);
+ /* Configure TX max burst duration timer to use usec units */
+ wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr;
+ bool need_unmask = true;
+
+ wil6210_mask_irq_rx(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err_ratelimited(wil, "spurious IRQ: RX\n");
+ wil6210_unmask_irq_rx(wil);
+ return IRQ_NONE;
+ }
+
+ /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
+ * moderation is not used. Interrupt moderation may cause RX
+ * buffer overflow while RX_DONE is delayed. The required
+ * action is always the same - should empty the accumulated
+ * packets from the RX ring.
+ */
+ if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
+ BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
+ wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n",
+ isr);
+
+ isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
+ BIT_DMA_EP_RX_ICR_RX_HTRSH);
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err_ratelimited(
+ wil,
+ "Got Rx interrupt while stopping interface\n");
+ }
+ } else {
+ wil_err_ratelimited(wil, "Got Rx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ /* Rx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr;
+ bool need_unmask = true;
+
+ wil6210_mask_irq_rx_edma(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ wil6210_unmask_irq_rx_edma(wil);
+ return IRQ_NONE;
+ }
+
+ if (likely(isr & BIT_RX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "RX status ring\n");
+ isr &= ~BIT_RX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil,
+ "Got Rx interrupt while stopping interface\n");
+ }
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ /* Rx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr;
+ bool need_unmask = true;
+
+ wil6210_mask_irq_tx_edma(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ wil6210_unmask_irq_tx_edma(wil);
+ return IRQ_NONE;
+ }
+
+ if (likely(isr & BIT_TX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "TX status ring\n");
+ isr &= ~BIT_TX_STATUS_IRQ;
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx status ring IRQ while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ /* Tx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx_edma(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr;
+ bool need_unmask = true;
+
+ wil6210_mask_irq_tx(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err_ratelimited(wil, "spurious IRQ: TX\n");
+ wil6210_unmask_irq_tx(wil);
+ return IRQ_NONE;
+ }
+
+ if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
+ wil_dbg_irq(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+ /* clear also all VRING interrupts */
+ isr &= ~(BIT(25) - 1UL);
+ if (likely(test_bit(wil_status_fwready, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err_ratelimited(wil, "Got Tx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err_ratelimited(wil, "un-handled TX ISR bits 0x%08x\n",
+ isr);
+
+ /* Tx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static void wil_notify_fw_error(struct wil6210_priv *wil)
+{
+ struct device *dev = &wil->main_ndev->dev;
+ char *envp[3] = {
+ [0] = "SOURCE=wil6210",
+ [1] = "EVENT=FW_ERROR",
+ [2] = NULL,
+ };
+ wil_err(wil, "Notify about firmware error\n");
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+ size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+
+ if (wil->mbox_ctl.rx.entry_size < min_size) {
+ wil_err(wil, "rx mbox entry too small (%d)\n",
+ wil->mbox_ctl.rx.entry_size);
+ return false;
+ }
+ if (wil->mbox_ctl.tx.entry_size < min_size) {
+ wil_err(wil, "tx mbox entry too small (%d)\n",
+ wil->mbox_ctl.tx.entry_size);
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr;
+
+ wil6210_mask_irq_misc(wil, false);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_misc(isr);
+ wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
+ wil6210_unmask_irq_misc(wil, false);
+ return IRQ_NONE;
+ }
+
+ if (isr & ISR_MISC_FW_ERROR) {
+ u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
+ u32 ucode_assert_code =
+ wil_r(wil, wil->rgf_ucode_assert_code_addr);
+
+ wil_err(wil,
+ "Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
+ fw_assert_code, ucode_assert_code);
+ clear_bit(wil_status_fwready, wil->status);
+ /*
+ * do not clear @isr here - we do 2-nd part in thread
+ * there, user space get notified, and it should be done
+ * in non-atomic context
+ */
+ }
+
+ if (isr & ISR_MISC_FW_READY) {
+ wil_dbg_irq(wil, "IRQ: FW ready\n");
+ wil_cache_mbox_regs(wil);
+ if (wil_validate_mbox_regs(wil))
+ set_bit(wil_status_mbox_ready, wil->status);
+ /**
+ * Actual FW ready indicated by the
+ * WMI_FW_READY_EVENTID
+ */
+ isr &= ~ISR_MISC_FW_READY;
+ }
+
+ if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
+ isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
+ if (wil->halp.handle_icr) {
+ /* no need to handle HALP ICRs until next vote */
+ wil->halp.handle_icr = false;
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
+ wil6210_mask_irq_misc(wil, true);
+ complete(&wil->halp.comp);
+ }
+ }
+
+ wil->isr_misc = isr;
+
+ if (isr) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ wil6210_unmask_irq_misc(wil, false);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil->isr_misc;
+
+ trace_wil6210_irq_misc_thread(isr);
+ wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil->recovery_state = fw_recovery_pending;
+ wil_fw_core_dump(wil);
+ wil_notify_fw_error(wil);
+ isr &= ~ISR_MISC_FW_ERROR;
+ if (wil->platform_ops.notify) {
+ wil_err(wil, "notify platform driver about FW crash");
+ wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_FW_CRASH);
+ } else {
+ wil_fw_error_recovery(wil);
+ }
+ }
+ if (isr & ISR_MISC_MBOX_EVT) {
+ wil_dbg_irq(wil, "MBOX event\n");
+ wmi_recv_cmd(wil);
+ isr &= ~ISR_MISC_MBOX_EVT;
+ }
+
+ if (isr)
+ wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+ wil->isr_misc = 0;
+
+ wil6210_unmask_irq_misc(wil, false);
+
+ /* in non-triple MSI case, this is done inside wil6210_thread_irq
+ * because it has to be done after unmasking the pseudo.
+ */
+ if (wil->n_msi == 3 && wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* thread IRQ handler */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+
+ wil_dbg_irq(wil, "Thread IRQ\n");
+ /* Discover real IRQ cause */
+ if (wil->isr_misc)
+ wil6210_irq_misc_thread(irq, cookie);
+
+ wil6210_unmask_irq_pseudo(wil);
+
+ if (wil->suspend_resp_rcvd) {
+ wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+ wil->suspend_resp_comp = true;
+ wake_up_interruptible(&wil->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+ u32 icm_rx, icr_rx, imv_rx;
+ u32 icm_tx, icr_tx, imv_tx;
+ u32 icm_misc, icr_misc, imv_misc;
+
+ if (!test_bit(wil_status_irqen, wil->status)) {
+ if (wil->use_enhanced_dma_hw) {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ } else {
+ icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+ offsetof(struct RGF_ICR, IMV));
+ }
+ icm_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ icr_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+ offsetof(struct RGF_ICR, IMV));
+
+ /* HALP interrupt can be unmasked when misc interrupts are
+ * masked
+ */
+ if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
+ return 0;
+
+ wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+ "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+ pseudo_cause,
+ icm_rx, icr_rx, imv_rx,
+ icm_tx, icr_tx, imv_tx,
+ icm_misc, icr_misc, imv_misc);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ struct wil6210_priv *wil = cookie;
+ u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
+
+ /**
+ * pseudo_cause is Clear-On-Read, no need to ACK
+ */
+ if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
+ return IRQ_NONE;
+
+ /* IRQ mask debug */
+ if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
+ return IRQ_NONE;
+
+ trace_wil6210_irq_pseudo(pseudo_cause);
+ wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
+
+ wil6210_mask_irq_pseudo(wil);
+
+ /* Discover real IRQ cause
+ * There are 2 possible phases for every IRQ:
+ * - hard IRQ handler called right here
+ * - threaded handler called later
+ *
+ * Hard IRQ handler reads and clears ISR.
+ *
+ * If threaded handler requested, hard IRQ handler
+ * returns IRQ_WAKE_THREAD and saves ISR register value
+ * for the threaded handler use.
+ *
+ * voting for wake thread - need at least 1 vote
+ */
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+ (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+ (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+ (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ /* if thread is requested, it will unmask IRQ */
+ if (rc != IRQ_WAKE_THREAD)
+ wil6210_unmask_irq_pseudo(wil);
+
+ return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+
+ /* IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+ rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED,
+ WIL_NAME "_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED,
+ WIL_NAME "_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME "_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
+/* can't use wil_ioread32_and_clear because ICC value is not set yet */
+static inline void wil_clear32(void __iomem *addr)
+{
+ u32 x = readl(addr);
+
+ writel(x, addr);
+}
+
+void wil6210_clear_irq(struct wil6210_priv *wil)
+{
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wmb(); /* make sure write completed */
+}
+
+void wil6210_set_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "set_halp\n");
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
+ BIT_DMA_EP_MISC_ICR_HALP);
+}
+
+void wil6210_clear_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "clear_halp\n");
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
+ BIT_DMA_EP_MISC_ICR_HALP);
+ wil6210_unmask_halp(wil);
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n",
+ wil->n_msi ? "MSI" : "INTx", wil->n_msi);
+
+ if (wil->use_enhanced_dma_hw) {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
+ } else {
+ wil->txrx_ops.irq_tx = wil6210_irq_tx;
+ wil->txrx_ops.irq_rx = wil6210_irq_rx;
+ }
+
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
+ return rc;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+ wil_dbg_misc(wil, "fini_irq:\n");
+
+ wil_mask_irq(wil);
+ free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644
index 000000000..94e61dbe9
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -0,0 +1,2014 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+#include "txrx_edma.h"
+#include "wmi.h"
+#include "boot_loader.h"
+
+#define WAIT_FOR_HALP_VOTE_MS 100
+#define WAIT_FOR_SCAN_ABORT_MS 1000
+#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
+#define WIL_BOARD_FILE_MAX_NAMELEN 128
+
+bool debug_fw; /* = false; */
+module_param(debug_fw, bool, 0444);
+MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+
+static u8 oob_mode;
+module_param(oob_mode, byte, 0444);
+MODULE_PARM_DESC(oob_mode,
+ " enable out of the box (OOB) mode in FW, for diagnostics and certification");
+
+bool no_fw_recovery;
+module_param(no_fw_recovery, bool, 0644);
+MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
+
+/* if not set via modparam, will be set to default value of 1/8 of
+ * rx ring size during init flow
+ */
+unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
+module_param(rx_ring_overflow_thrsh, ushort, 0444);
+MODULE_PARM_DESC(rx_ring_overflow_thrsh,
+ " RX ring overflow threshold in descriptors.");
+
+/* We allow allocation of more than 1 page buffers to support large packets.
+ * It is suboptimal behavior performance wise in case MTU above page size.
+ */
+unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+static int mtu_max_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ /* sets mtu_max directly. no need to restore it in case of
+ * illegal value since we assume this will fail insmod
+ */
+ ret = param_set_uint(val, kp);
+ if (ret)
+ return ret;
+
+ if (mtu_max < 68 || mtu_max > WIL_MAX_ETH_MTU)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static const struct kernel_param_ops mtu_max_ops = {
+ .set = mtu_max_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
+MODULE_PARM_DESC(mtu_max, " Max MTU value.");
+
+static uint rx_ring_order;
+static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
+
+static int ring_order_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ uint x;
+
+ ret = kstrtouint(val, 0, &x);
+ if (ret)
+ return ret;
+
+ if ((x < WIL_RING_SIZE_ORDER_MIN) || (x > WIL_RING_SIZE_ORDER_MAX))
+ return -EINVAL;
+
+ *((uint *)kp->arg) = x;
+
+ return 0;
+}
+
+static const struct kernel_param_ops ring_order_ops = {
+ .set = ring_order_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444);
+MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444);
+MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
+module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
+MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
+
+enum {
+ WIL_BOOT_ERR,
+ WIL_BOOT_VANILLA,
+ WIL_BOOT_PRODUCTION,
+ WIL_BOOT_DEVELOPMENT,
+};
+
+enum {
+ WIL_SIG_STATUS_VANILLA = 0x0,
+ WIL_SIG_STATUS_DEVELOPMENT = 0x1,
+ WIL_SIG_STATUS_PRODUCTION = 0x2,
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
+};
+
+#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
+#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
+
+#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
+
+#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
+/* round up to be above 2 ms total */
+#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count)
+{
+ u32 *d = dst;
+ const volatile u32 __iomem *s = src;
+
+ for (; count >= 4; count -= 4)
+ *d++ = __raw_readl(s++);
+
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = __raw_readl(s);
+
+ memcpy(d, &tmp, count);
+ }
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+ const u32 *s = src;
+
+ for (; count >= 4; count -= 4)
+ __raw_writel(*s++, d++);
+
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = 0;
+
+ memcpy(&tmp, s, count);
+ __raw_writel(tmp, d);
+ }
+}
+
+/* Device memory access is prohibited while reset or suspend.
+ * wil_mem_access_lock protects accessing device memory in these cases
+ */
+int wil_mem_access_lock(struct wil6210_priv *wil)
+{
+ if (!down_read_trylock(&wil->mem_lock))
+ return -EBUSY;
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ up_read(&wil->mem_lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void wil_mem_access_unlock(struct wil6210_priv *wil)
+{
+ up_read(&wil->mem_lock);
+}
+
+static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct wil_ring *ring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ if (!ring->va)
+ return;
+
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
+
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->mid = U8_MAX;
+ txdata->enabled = 0; /* no Tx can be in progress or start anew */
+ spin_unlock_bh(&txdata->lock);
+ /* napi_synchronize waits for completion of the current NAPI but will
+ * not prevent the next NAPI run.
+ * Add a memory barrier to guarantee that txdata->enabled is zeroed
+ * before napi_synchronize so that the next scheduled NAPI will not
+ * handle this vring
+ */
+ wmb();
+ /* make sure NAPI won't touch this vring */
+ if (test_bit(wil_status_napi_en, wil->status))
+ napi_synchronize(&wil->napi_tx);
+
+ wil->txrx_ops.ring_fini_tx(wil, ring);
+}
+
+static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
+{
+ int i;
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ if (wil->sta[i].mid == mid &&
+ wil->sta[i].status == wil_sta_connected)
+ return true;
+ }
+
+ return false;
+}
+
+static void wil_disconnect_cid_complete(struct wil6210_vif *vif, int cid,
+ u16 reason_code)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ uint i;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct wil_sta_info *sta = &wil->sta[cid];
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ might_sleep();
+ wil_dbg_misc(wil,
+ "disconnect_cid_complete: CID %d, MID %d, status %d\n",
+ cid, sta->mid, sta->status);
+ /* inform upper layers */
+ if (sta->status != wil_sta_unused) {
+ if (vif->mid != sta->mid) {
+ wil_err(wil, "STA MID mismatch with VIF MID(%d)\n",
+ vif->mid);
+ }
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ /* AP-like interface */
+ cfg80211_del_sta(ndev, sta->addr, GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+ sta->status = wil_sta_unused;
+ sta->mid = U8_MAX;
+ }
+ /* reorder buffers */
+ for (i = 0; i < WIL_STA_TID_NUM; i++) {
+ struct wil_tid_ampdu_rx *r;
+
+ spin_lock_bh(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[i];
+ sta->tid_rx[i] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+
+ spin_unlock_bh(&sta->tid_rx_lock);
+ }
+ /* crypto context */
+ memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
+ memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
+ /* release vrings */
+ for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ if (wil->ring2cid_tid[i][0] == cid)
+ wil_ring_fini_tx(wil, i);
+ }
+ /* statistics */
+ memset(&sta->stats, 0, sizeof(sta->stats));
+ sta->stats.tx_latency_min_us = U32_MAX;
+}
+
+static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
+ const u8 *bssid, u16 reason_code)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int cid = -ENOENT;
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+
+ ndev = vif_to_ndev(vif);
+ wdev = vif_to_wdev(vif);
+
+ might_sleep();
+ wil_info(wil, "disconnect_complete: bssid=%pM, reason=%d\n",
+ bssid, reason_code);
+
+ /* Cases are:
+ * - disconnect single STA, still connected
+ * - disconnect single STA, already disconnected
+ * - disconnect all
+ *
+ * For "disconnect all", there are 3 options:
+ * - bssid == NULL
+ * - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
+ * - bssid is our MAC address
+ */
+ if (bssid && !is_broadcast_ether_addr(bssid) &&
+ !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
+ cid = wil_find_cid(wil, vif->mid, bssid);
+ wil_dbg_misc(wil,
+ "Disconnect complete %pM, CID=%d, reason=%d\n",
+ bssid, cid, reason_code);
+ if (wil_cid_valid(wil, cid)) /* disconnect 1 peer */
+ wil_disconnect_cid_complete(vif, cid, reason_code);
+ } else { /* all */
+ wil_dbg_misc(wil, "Disconnect complete all\n");
+ for (cid = 0; cid < wil->max_assoc_sta; cid++)
+ wil_disconnect_cid_complete(vif, cid, reason_code);
+ }
+
+ /* link state */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_bcast_fini(vif);
+ wil_update_net_queues_bh(wil, vif, NULL, true);
+ netif_carrier_off(ndev);
+ if (!wil_has_other_active_ifaces(wil, ndev, false, true))
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
+
+ if (test_and_clear_bit(wil_vif_fwconnected, vif->status)) {
+ atomic_dec(&wil->connected_vifs);
+ cfg80211_disconnected(ndev, reason_code,
+ NULL, 0,
+ vif->locally_generated_disc,
+ GFP_KERNEL);
+ vif->locally_generated_disc = false;
+ } else if (test_bit(wil_vif_fwconnecting, vif->status)) {
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ vif->bss = NULL;
+ }
+ clear_bit(wil_vif_fwconnecting, vif->status);
+ clear_bit(wil_vif_ft_roam, vif->status);
+ vif->ptk_rekey_state = WIL_REKEY_IDLE;
+
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (!wil_vif_is_connected(wil, vif->mid)) {
+ wil_update_net_queues_bh(wil, vif, NULL, true);
+ if (test_and_clear_bit(wil_vif_fwconnected,
+ vif->status))
+ atomic_dec(&wil->connected_vifs);
+ } else {
+ wil_update_net_queues_bh(wil, vif, NULL, false);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int wil_disconnect_cid(struct wil6210_vif *vif, int cid,
+ u16 reason_code)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct wil_sta_info *sta = &wil->sta[cid];
+ bool del_sta = false;
+
+ might_sleep();
+ wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
+ cid, sta->mid, sta->status);
+
+ if (sta->status == wil_sta_unused)
+ return 0;
+
+ if (vif->mid != sta->mid) {
+ wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", vif->mid);
+ return -EINVAL;
+ }
+
+ /* inform lower layers */
+ if (wdev->iftype == NL80211_IFTYPE_AP && disable_ap_sme)
+ del_sta = true;
+
+ /* disconnect by sending command disconnect/del_sta and wait
+ * synchronously for WMI_DISCONNECT_EVENTID event.
+ */
+ return wmi_disconnect_sta(vif, sta->addr, reason_code, del_sta);
+}
+
+static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code)
+{
+ struct wil6210_priv *wil;
+ struct net_device *ndev;
+ int cid = -ENOENT;
+
+ if (unlikely(!vif))
+ return;
+
+ wil = vif_to_wil(vif);
+ ndev = vif_to_ndev(vif);
+
+ might_sleep();
+ wil_info(wil, "disconnect bssid=%pM, reason=%d\n", bssid, reason_code);
+
+ /* Cases are:
+ * - disconnect single STA, still connected
+ * - disconnect single STA, already disconnected
+ * - disconnect all
+ *
+ * For "disconnect all", there are 3 options:
+ * - bssid == NULL
+ * - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
+ * - bssid is our MAC address
+ */
+ if (bssid && !is_broadcast_ether_addr(bssid) &&
+ !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
+ cid = wil_find_cid(wil, vif->mid, bssid);
+ wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
+ bssid, cid, reason_code);
+ if (wil_cid_valid(wil, cid)) /* disconnect 1 peer */
+ wil_disconnect_cid(vif, cid, reason_code);
+ } else { /* all */
+ wil_dbg_misc(wil, "Disconnect all\n");
+ for (cid = 0; cid < wil->max_assoc_sta; cid++)
+ wil_disconnect_cid(vif, cid, reason_code);
+ }
+
+ /* call event handler manually after processing wmi_call,
+ * to avoid deadlock - disconnect event handler acquires
+ * wil->mutex while it is already held here
+ */
+ _wil6210_disconnect_complete(vif, bssid, reason_code);
+}
+
+void wil_disconnect_worker(struct work_struct *work)
+{
+ struct wil6210_vif *vif = container_of(work,
+ struct wil6210_vif, disconnect_worker);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_disconnect_event evt;
+ } __packed reply;
+
+ if (test_bit(wil_vif_fwconnected, vif->status))
+ /* connect succeeded after all */
+ return;
+
+ if (!test_bit(wil_vif_fwconnecting, vif->status))
+ /* already disconnected */
+ return;
+
+ memset(&reply, 0, sizeof(reply));
+
+ rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0,
+ WMI_DISCONNECT_EVENTID, &reply, sizeof(reply),
+ WIL6210_DISCONNECT_TO_MS);
+ if (rc) {
+ wil_err(wil, "disconnect error %d\n", rc);
+ return;
+ }
+
+ wil_update_net_queues_bh(wil, vif, NULL, true);
+ netif_carrier_off(ndev);
+ cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
+ clear_bit(wil_vif_fwconnecting, vif->status);
+}
+
+static int wil_wait_for_recovery(struct wil6210_priv *wil)
+{
+ if (wait_event_interruptible(wil->wq, wil->recovery_state !=
+ fw_recovery_pending)) {
+ wil_err(wil, "Interrupt, canceling recovery\n");
+ return -ERESTARTSYS;
+ }
+ if (wil->recovery_state != fw_recovery_running) {
+ wil_info(wil, "Recovery cancelled\n");
+ return -EINTR;
+ }
+ wil_info(wil, "Proceed with recovery\n");
+ return 0;
+}
+
+void wil_set_recovery_state(struct wil6210_priv *wil, int state)
+{
+ wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n",
+ wil->recovery_state, state);
+
+ wil->recovery_state = state;
+ wake_up_interruptible(&wil->wq);
+}
+
+bool wil_is_recovery_blocked(struct wil6210_priv *wil)
+{
+ return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
+}
+
+static void wil_fw_error_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ fw_error_worker);
+ struct net_device *ndev = wil->main_ndev;
+ struct wireless_dev *wdev;
+
+ wil_dbg_misc(wil, "fw error worker\n");
+
+ if (!ndev || !(ndev->flags & IFF_UP)) {
+ wil_info(wil, "No recovery - interface is down\n");
+ return;
+ }
+ wdev = ndev->ieee80211_ptr;
+
+ /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
+ * passed since last recovery attempt
+ */
+ if (time_is_after_jiffies(wil->last_fw_recovery +
+ WIL6210_FW_RECOVERY_TO))
+ wil->recovery_count++;
+ else
+ wil->recovery_count = 1; /* fw was alive for a long time */
+
+ if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
+ wil_err(wil, "too many recovery attempts (%d), giving up\n",
+ wil->recovery_count);
+ return;
+ }
+
+ wil->last_fw_recovery = jiffies;
+
+ wil_info(wil, "fw error recovery requested (try %d)...\n",
+ wil->recovery_count);
+ if (!no_fw_recovery)
+ wil->recovery_state = fw_recovery_running;
+ if (wil_wait_for_recovery(wil) != 0)
+ return;
+
+ rtnl_lock();
+ mutex_lock(&wil->mutex);
+ /* Needs adaptation for multiple VIFs
+ * need to go over all VIFs and consider the appropriate
+ * recovery because each one can have different iftype.
+ */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_MONITOR:
+ /* silent recovery, upper layers will see disconnect */
+ __wil_down(wil);
+ __wil_up(wil);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (no_fw_recovery) /* upper layers do recovery */
+ break;
+ /* silent recovery, upper layers will see disconnect */
+ __wil_down(wil);
+ __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+ wil_cfg80211_ap_recovery(wil);
+ mutex_lock(&wil->mutex);
+ wil_info(wil, "... completed\n");
+ break;
+ default:
+ wil_err(wil, "No recovery - unknown interface type %d\n",
+ wdev->iftype);
+ break;
+ }
+
+ mutex_unlock(&wil->mutex);
+ rtnl_unlock();
+}
+
+static int wil_find_free_ring(struct wil6210_priv *wil)
+{
+ int i;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->ring_tx[i].va)
+ return i;
+ }
+ return -EINVAL;
+}
+
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc = -EINVAL, ringid;
+
+ if (cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ goto out;
+ }
+ ringid = wil_find_free_ring(wil);
+ if (ringid < 0) {
+ wil_err(wil, "No free vring found\n");
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
+ cid, vif->mid, ringid);
+
+ rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
+ cid, 0);
+ if (rc)
+ wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
+ cid, vif->mid, ringid);
+
+out:
+ return rc;
+}
+
+int wil_bcast_init(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int ri = vif->bcast_ring, rc;
+
+ if (ri >= 0 && wil->ring_tx[ri].va)
+ return 0;
+
+ ri = wil_find_free_ring(wil);
+ if (ri < 0)
+ return ri;
+
+ vif->bcast_ring = ri;
+ rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
+ if (rc)
+ vif->bcast_ring = -1;
+
+ return rc;
+}
+
+void wil_bcast_fini(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int ri = vif->bcast_ring;
+
+ if (ri < 0)
+ return;
+
+ vif->bcast_ring = -1;
+ wil_ring_fini_tx(wil, ri);
+}
+
+void wil_bcast_fini_all(struct wil6210_priv *wil)
+{
+ int i;
+ struct wil6210_vif *vif;
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+ if (vif)
+ wil_bcast_fini(vif);
+ }
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+ uint i;
+
+ wil_dbg_misc(wil, "priv_init\n");
+
+ memset(wil->sta, 0, sizeof(wil->sta));
+ for (i = 0; i < WIL6210_MAX_CID; i++) {
+ spin_lock_init(&wil->sta[i].tid_rx_lock);
+ wil->sta[i].mid = U8_MAX;
+ }
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ spin_lock_init(&wil->ring_tx_data[i].lock);
+ wil->ring2cid_tid[i][0] = WIL6210_MAX_CID;
+ }
+
+ mutex_init(&wil->mutex);
+ mutex_init(&wil->vif_mutex);
+ mutex_init(&wil->wmi_mutex);
+ mutex_init(&wil->halp.lock);
+
+ init_completion(&wil->wmi_ready);
+ init_completion(&wil->wmi_call);
+ init_completion(&wil->halp.comp);
+
+ INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+ INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
+
+ INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ spin_lock_init(&wil->wmi_ev_lock);
+ spin_lock_init(&wil->net_queue_lock);
+ spin_lock_init(&wil->eap_lock);
+
+ init_waitqueue_head(&wil->wq);
+ init_rwsem(&wil->mem_lock);
+
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
+ if (!wil->wmi_wq)
+ return -EAGAIN;
+
+ wil->wq_service = create_singlethread_workqueue(WIL_NAME "_service");
+ if (!wil->wq_service)
+ goto out_wmi_wq;
+
+ wil->last_fw_recovery = jiffies;
+ wil->tx_interframe_timeout = WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT;
+ wil->rx_interframe_timeout = WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT;
+ wil->tx_max_burst_duration = WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT;
+ wil->rx_max_burst_duration = WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT;
+
+ if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
+ rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
+
+ wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+
+ wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
+ WMI_WAKEUP_TRIGGER_BCAST;
+ memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
+ wil->ring_idle_trsh = 16;
+
+ wil->reply_mid = U8_MAX;
+ wil->max_vifs = 1;
+ wil->max_assoc_sta = max_assoc_sta;
+
+ /* edma configuration can be updated via debugfs before allocation */
+ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ /* Rx status ring size should be bigger than the number of RX buffers
+ * in order to prevent backpressure on the status ring, which may
+ * cause HW freeze.
+ */
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+ /* Number of RX buffer IDs should be bigger than the RX descriptor
+ * ring size as in HW reorder flow, the HW can consume additional
+ * buffers before releasing the previous ones.
+ */
+ wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
+
+ wil->amsdu_en = true;
+
+ return 0;
+
+out_wmi_wq:
+ destroy_workqueue(wil->wmi_wq);
+
+ return -EAGAIN;
+}
+
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
+{
+ if (wil->platform_ops.bus_request) {
+ wil->bus_request_kbps = kbps;
+ wil->platform_ops.bus_request(wil->platform_handle, kbps);
+ }
+}
+
+/**
+ * wil6210_disconnect - disconnect one connection
+ * @vif: virtual interface context
+ * @bssid: peer to disconnect, NULL to disconnect all
+ * @reason_code: Reason code for the Disassociation frame
+ *
+ * Disconnect and release associated resources. Issue WMI
+ * command(s) to trigger MAC disconnect. When command was issued
+ * successfully, call the wil6210_disconnect_complete function
+ * to handle the event synchronously
+ */
+void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_dbg_misc(wil, "disconnecting\n");
+
+ del_timer_sync(&vif->connect_timer);
+ _wil6210_disconnect(vif, bssid, reason_code);
+}
+
+/**
+ * wil6210_disconnect_complete - handle disconnect event
+ * @vif: virtual interface context
+ * @bssid: peer to disconnect, NULL to disconnect all
+ * @reason_code: Reason code for the Disassociation frame
+ *
+ * Release associated resources and indicate upper layers the
+ * connection is terminated.
+ */
+void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_dbg_misc(wil, "got disconnect\n");
+
+ del_timer_sync(&vif->connect_timer);
+ _wil6210_disconnect_complete(vif, bssid, reason_code);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "priv_deinit\n");
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ cancel_work_sync(&wil->fw_error_worker);
+ wmi_event_flush(wil);
+ destroy_workqueue(wil->wq_service);
+ destroy_workqueue(wil->wmi_wq);
+ kfree(wil->brd_info);
+}
+
+static void wil_shutdown_bl(struct wil6210_priv *wil)
+{
+ u32 val;
+
+ wil_s(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD);
+
+ usleep_range(100, 150);
+
+ val = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ if (val & BL_SHUTDOWN_HS_RTD) {
+ wil_dbg_misc(wil, "BL is ready for halt\n");
+ return;
+ }
+
+ wil_err(wil, "BL did not report ready for halt\n");
+}
+
+/* this format is used by ARC embedded CPU for instruction memory */
+static inline u32 ARC_me_imm32(u32 d)
+{
+ return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16);
+}
+
+/* defines access to interrupt vectors for wil_freeze_bl */
+#define ARC_IRQ_VECTOR_OFFSET(N) ((N) * 8)
+/* ARC long jump instruction */
+#define ARC_JAL_INST (0x20200f80)
+
+static void wil_freeze_bl(struct wil6210_priv *wil)
+{
+ u32 jal, upc, saved;
+ u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3);
+
+ jal = wil_r(wil, wil->iccm_base + ivt3);
+ if (jal != ARC_me_imm32(ARC_JAL_INST)) {
+ wil_dbg_misc(wil, "invalid IVT entry found, skipping\n");
+ return;
+ }
+
+ /* prevent the target from entering deep sleep
+ * and disabling memory access
+ */
+ saved = wil_r(wil, RGF_USER_USAGE_8);
+ wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP);
+ usleep_range(20, 25); /* let the BL process the bit */
+
+ /* redirect to endless loop in the INT_L1 context and let it trap */
+ wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3));
+ usleep_range(20, 25); /* let the BL get into the trap */
+
+ /* verify the BL is frozen */
+ upc = wil_r(wil, RGF_USER_CPU_PC);
+ if (upc < ivt3 || (upc > (ivt3 + 8)))
+ wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc);
+
+ wil_w(wil, RGF_USER_USAGE_8, saved);
+}
+
+static void wil_bl_prepare_halt(struct wil6210_priv *wil)
+{
+ u32 tmp, ver;
+
+ /* before halting device CPU driver must make sure BL is not accessing
+ * host memory. This is done differently depending on BL version:
+ * 1. For very old BL versions the procedure is skipped
+ * (not supported).
+ * 2. For old BL version we use a special trick to freeze the BL
+ * 3. For new BL versions we shutdown the BL using handshake procedure.
+ */
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+ if (!tmp) {
+ wil_dbg_misc(wil, "old BL, skipping halt preparation\n");
+ return;
+ }
+
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ ver = BL_SHUTDOWN_HS_PROT_VER(tmp);
+
+ if (ver > 0)
+ wil_shutdown_bl(wil);
+ else
+ wil_freeze_bl(wil);
+}
+
+static inline void wil_halt_cpu(struct wil6210_priv *wil)
+{
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
+ BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
+ BIT_USER_MAC_CPU_MAN_RST);
+ } else {
+ wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+ }
+}
+
+static inline void wil_release_cpu(struct wil6210_priv *wil)
+{
+ /* Start CPU */
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
+ else
+ wil_w(wil, RGF_USER_USER_CPU_0, 1);
+}
+
+static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
+{
+ wil_info(wil, "oob_mode to %d\n", mode);
+ switch (mode) {
+ case 0:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE |
+ BIT_USER_OOB_R2_MODE);
+ break;
+ case 1:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
+ wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+ break;
+ case 2:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+ wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
+ break;
+ default:
+ wil_err(wil, "invalid oob_mode: %d\n", mode);
+ }
+}
+
+static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
+{
+ int delay = 0;
+ u32 x, x1 = 0;
+
+ /* wait until device ready. */
+ if (no_flash) {
+ msleep(PMU_READY_DELAY_MS);
+
+ wil_dbg_misc(wil, "Reset completed\n");
+ } else {
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
+
+ wil_dbg_misc(wil, "Reset completed in %d ms\n",
+ delay * RST_DELAY);
+ }
+
+ return 0;
+}
+
+static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
+{
+ u32 otp_hw;
+ u8 signature_status;
+ bool otp_signature_err;
+ bool hw_section_done;
+ u32 otp_qc_secured;
+ int delay = 0;
+
+ /* Wait for OTP signature test to complete */
+ usleep_range(2000, 2200);
+
+ wil->boot_config = WIL_BOOT_ERR;
+
+ /* Poll until OTP signature status is valid.
+ * In vanilla and development modes, when signature test is complete
+ * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
+ * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
+ * for signature status change to 2 or 3.
+ */
+ do {
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ signature_status = WIL_GET_BITS(otp_hw, 8, 9);
+ otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
+
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_VANILLA) {
+ wil->boot_config = WIL_BOOT_VANILLA;
+ break;
+ }
+ if (otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
+ wil->boot_config = WIL_BOOT_DEVELOPMENT;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status == WIL_SIG_STATUS_PRODUCTION) {
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (!otp_signature_err &&
+ signature_status ==
+ WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
+ /* Unrecognized OTP signature found. Possibly a
+ * corrupted production signature, access control
+ * is applied as in production mode, therefore
+ * do not fail
+ */
+ wil->boot_config = WIL_BOOT_PRODUCTION;
+ break;
+ }
+ if (delay++ > OTP_HW_COUNT)
+ break;
+
+ usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
+ } while (!otp_signature_err && signature_status == 0);
+
+ if (wil->boot_config == WIL_BOOT_ERR) {
+ wil_err(wil,
+ "invalid boot config, signature_status %d otp_signature_err %d\n",
+ signature_status, otp_signature_err);
+ return -ETIME;
+ }
+
+ wil_dbg_misc(wil,
+ "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
+ delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
+
+ if (wil->boot_config == WIL_BOOT_VANILLA)
+ /* Assuming not SPI boot (currently not supported) */
+ goto out;
+
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+ delay = 0;
+
+ while (!hw_section_done) {
+ msleep(RST_DELAY);
+
+ otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
+ hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
+
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "TO waiting for hw_section_done\n");
+ return -ETIME;
+ }
+ }
+
+ wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
+
+ otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
+ wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
+ wil_dbg_misc(wil, "secured boot is %sabled\n",
+ wil->secured_boot ? "en" : "dis");
+
+out:
+ wil_dbg_misc(wil, "Reset completed\n");
+
+ return 0;
+}
+
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+{
+ u32 x;
+ int rc;
+
+ wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
+
+ if (wil->hw_version < HW_VER_TALYN) {
+ /* Clear MAC link up */
+ wil_s(wil, RGF_HP_CTRL, BIT(15));
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0,
+ BIT_HPAL_PERST_FROM_PAD);
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+ }
+
+ wil_halt_cpu(wil);
+
+ if (!no_flash) {
+ /* clear all boot loader "ready" bits */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready), 0);
+ /* this should be safe to write even with old BLs */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), 0);
+ }
+ /* Clear Fw Download notification */
+ wil_c(wil, RGF_USER_USAGE_6, BIT(0));
+
+ wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ /* XTAL stabilization should take about 3ms */
+ usleep_range(5000, 7000);
+ x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
+ if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+ wil_err(wil, "Xtal stabilization timeout\n"
+ "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+ return -ETIME;
+ }
+ /* switch 10k to XTAL*/
+ wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ /* 40 MHz */
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+
+ if (wil->hw_version >= HW_VER_TALYN_MB) {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ } else {
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
+ }
+
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+ wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+ /* reset A2 PCIE AHB */
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
+
+ wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ if (wil->hw_version == HW_VER_TALYN_MB)
+ rc = wil_wait_device_ready_talyn_mb(wil);
+ else
+ rc = wil_wait_device_ready(wil, no_flash);
+ if (rc)
+ return rc;
+
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+ /* enable fix for HW bug related to the SA/DA swap in AP Rx */
+ wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+ BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+
+ if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
+ /* Reset OTP HW vectors to fit 40MHz */
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001);
+ wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
+ }
+
+ return 0;
+}
+
+static void wil_collect_fw_info(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u8 retry_short;
+ int rc;
+
+ wil_refresh_fw_capabilities(wil);
+
+ rc = wmi_get_mgmt_retry(wil, &retry_short);
+ if (!rc) {
+ wiphy->retry_short = retry_short;
+ wil_dbg_misc(wil, "FW retry_short: %d\n", retry_short);
+ }
+}
+
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ int features;
+
+ wil->keep_radio_on_during_sleep =
+ test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+ wil->platform_capa) &&
+ test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+ wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+ wil->keep_radio_on_during_sleep);
+
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ else
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ if (test_bit(WMI_FW_CAPABILITY_TX_REQ_EXT, wil->fw_capabilities))
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
+
+ if (wil->platform_ops.set_features) {
+ features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+ wil->fw_capabilities) &&
+ test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+ wil->platform_capa)) ?
+ BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+ if (wil->n_msi == 3)
+ features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI);
+
+ wil->platform_ops.set_features(wil->platform_handle, features);
+ }
+
+ if (test_bit(WMI_FW_CAPABILITY_BACK_WIN_SIZE_64,
+ wil->fw_capabilities)) {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE_64;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE_128;
+ } else {
+ wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
+ wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
+ }
+
+ update_supported_bands(wil);
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+ le32_to_cpus(&r->base);
+ le16_to_cpus(&r->entry_size);
+ le16_to_cpus(&r->size);
+ le32_to_cpus(&r->tail);
+ le32_to_cpus(&r->head);
+}
+
+/* construct actual board file name to use */
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
+{
+ const char *board_file;
+ const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+
+ if (wil->board_file) {
+ board_file = wil->board_file;
+ } else {
+ /* If specific FW file is used for Talyn,
+ * use specific board file
+ */
+ if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0)
+ board_file = WIL_BRD_NAME_TALYN;
+ else
+ board_file = WIL_BOARD_FILE_NAME;
+ }
+
+ strscpy(buf, board_file, len);
+}
+
+static int wil_get_bl_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ union {
+ struct bl_dedicated_registers_v0 bl0;
+ struct bl_dedicated_registers_v1 bl1;
+ } bl;
+ u32 bl_ver;
+ u8 *mac;
+ u16 rf_status;
+
+ wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+ sizeof(bl));
+ bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+ mac = bl.bl0.mac_address;
+
+ if (bl_ver == 0) {
+ le32_to_cpus(&bl.bl0.rf_type);
+ le32_to_cpus(&bl.bl0.baseband_type);
+ rf_status = 0; /* actually, unknown */
+ wil_info(wil,
+ "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+ bl_ver, mac,
+ bl.bl0.rf_type, bl.bl0.baseband_type);
+ wil_info(wil, "Boot Loader build unknown for struct v0\n");
+ } else {
+ le16_to_cpus(&bl.bl1.rf_type);
+ rf_status = le16_to_cpu(bl.bl1.rf_status);
+ le32_to_cpus(&bl.bl1.baseband_type);
+ le16_to_cpus(&bl.bl1.bl_version_subminor);
+ le16_to_cpus(&bl.bl1.bl_version_build);
+ wil_info(wil,
+ "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+ bl_ver, mac,
+ bl.bl1.rf_type, rf_status,
+ bl.bl1.baseband_type);
+ wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+ bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+ bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "BL: Invalid MAC %pM\n", mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_set(ndev, mac);
+
+ if (rf_status) {/* bad RF cable? */
+ wil_err(wil, "RF communication error 0x%04x",
+ rf_status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+ u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+ u32 bl_ver = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+
+ if (bl_ver < 2)
+ return;
+
+ bl_assert_code = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_assert_code));
+ bl_assert_blink = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_assert_blink));
+ bl_magic_number = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_magic_number));
+
+ if (is_err) {
+ wil_err(wil,
+ "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+ bl_assert_code, bl_assert_blink, bl_magic_number);
+ } else {
+ wil_dbg_misc(wil,
+ "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+ bl_assert_code, bl_assert_blink, bl_magic_number);
+ }
+}
+
+static int wil_get_otp_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u8 mac[8];
+ int mac_addr;
+
+ /* OEM MAC has precedence */
+ mac_addr = RGF_OTP_OEM_MAC;
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac));
+
+ if (is_valid_ether_addr(mac)) {
+ wil_info(wil, "using OEM MAC %pM\n", mac);
+ } else {
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ mac_addr = RGF_OTP_MAC_TALYN_MB;
+ else
+ mac_addr = RGF_OTP_MAC;
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
+ sizeof(mac));
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "Invalid MAC %pM\n", mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_set(ndev, mac);
+
+ return 0;
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+ ulong to = msecs_to_jiffies(2000);
+ ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+
+ if (0 == left) {
+ wil_err(wil, "Firmware not ready\n");
+ return -ETIME;
+ } else {
+ wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+ jiffies_to_msecs(to-left), wil->hw_version);
+ }
+ return 0;
+}
+
+void wil_abort_scan(struct wil6210_vif *vif, bool sync)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ lockdep_assert_held(&wil->vif_mutex);
+
+ if (!vif->scan_request)
+ return;
+
+ wil_dbg_misc(wil, "Abort scan_request 0x%p\n", vif->scan_request);
+ del_timer_sync(&vif->scan_timer);
+ mutex_unlock(&wil->vif_mutex);
+ rc = wmi_abort_scan(vif);
+ if (!rc && sync)
+ wait_event_interruptible_timeout(wil->wq, !vif->scan_request,
+ msecs_to_jiffies(
+ WAIT_FOR_SCAN_ABORT_MS));
+
+ mutex_lock(&wil->vif_mutex);
+ if (vif->scan_request) {
+ cfg80211_scan_done(vif->scan_request, &info);
+ vif->scan_request = NULL;
+ }
+}
+
+void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync)
+{
+ int i;
+
+ lockdep_assert_held(&wil->vif_mutex);
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ struct wil6210_vif *vif = wil->vifs[i];
+
+ if (vif)
+ wil_abort_scan(vif, sync);
+ }
+}
+
+int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
+{
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
+ wil_err(wil, "set_power_mgmt not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
+ if (rc)
+ wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
+ else
+ wil->ps_profile = ps_profile;
+
+ return rc;
+}
+
+static void wil_pre_fw_config(struct wil6210_priv *wil)
+{
+ wil_clear_fw_log_addr(wil);
+ /* Mark FW as loaded from host */
+ wil_s(wil, RGF_USER_USAGE_6, 1);
+
+ /* clear any interrupts which on-card-firmware
+ * may have set
+ */
+ wil6210_clear_irq(wil);
+ /* CAF_ICR - clear and mask */
+ /* it is W1C, clear by writing back same value */
+ if (wil->hw_version < HW_VER_TALYN_MB) {
+ wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+ }
+ /* clear PAL_UNIT_ICR (potential D0->D3 leftover)
+ * In Talyn-MB host cannot access this register due to
+ * access control, hence PAL_UNIT_ICR is cleared by the FW
+ */
+ if (wil->hw_version < HW_VER_TALYN_MB)
+ wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
+ 0);
+
+ if (wil->fw_calib_result > 0) {
+ __le32 val = cpu_to_le32(wil->fw_calib_result |
+ (CALIB_RESULT_SIGNATURE << 8));
+ wil_w(wil, RGF_USER_FW_CALIB_RESULT, (u32 __force)val);
+ }
+}
+
+static int wil_restore_vifs(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif;
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ int i, rc;
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+ if (!vif)
+ continue;
+ vif->ap_isolate = 0;
+ if (vif->mid) {
+ ndev = vif_to_ndev(vif);
+ wdev = vif_to_wdev(vif);
+ rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr,
+ wdev->iftype);
+ if (rc) {
+ wil_err(wil, "fail to restore VIF %d type %d, rc %d\n",
+ i, wdev->iftype, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Clear FW and ucode log start addr to indicate FW log is not ready. The host
+ * driver clears the addresses before FW starts and FW initializes the address
+ * when it is ready to send logs.
+ */
+void wil_clear_fw_log_addr(struct wil6210_priv *wil)
+{
+ /* FW log addr */
+ wil_w(wil, RGF_USER_USAGE_1, 0);
+ /* ucode log addr */
+ wil_w(wil, RGF_USER_USAGE_2, 0);
+ wil_dbg_misc(wil, "Cleared FW and ucode log address");
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil, bool load_fw)
+{
+ int rc, i;
+ unsigned long status_flags = BIT(wil_status_resetting);
+ int no_flash;
+ struct wil6210_vif *vif;
+
+ wil_dbg_misc(wil, "reset\n");
+
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+ WARN_ON(test_bit(wil_status_napi_en, wil->status));
+
+ if (debug_fw) {
+ static const u8 mac[ETH_ALEN] = {
+ 0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
+ };
+ struct net_device *ndev = wil->main_ndev;
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ eth_hw_addr_set(ndev, ndev->perm_addr);
+ return 0;
+ }
+
+ if (wil->hw_version == HW_VER_UNKNOWN)
+ return -ENODEV;
+
+ if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa) &&
+ wil->hw_version < HW_VER_TALYN_MB) {
+ wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+ }
+
+ if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+ }
+
+ if (wil->platform_ops.notify) {
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_PRE_RESET);
+ if (rc)
+ wil_err(wil, "PRE_RESET platform notify failed, rc %d\n",
+ rc);
+ }
+
+ set_bit(wil_status_resetting, wil->status);
+ mutex_lock(&wil->vif_mutex);
+ wil_abort_scan_all_vifs(wil, false);
+ mutex_unlock(&wil->vif_mutex);
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+ if (vif) {
+ cancel_work_sync(&vif->disconnect_worker);
+ wil6210_disconnect(vif, NULL,
+ WLAN_REASON_DEAUTH_LEAVING);
+ vif->ptk_rekey_state = WIL_REKEY_IDLE;
+ }
+ }
+ wil_bcast_fini_all(wil);
+
+ /* Disable device led before reset*/
+ wmi_led_cfg(wil, false);
+
+ down_write(&wil->mem_lock);
+
+ /* prevent NAPI from being scheduled and prevent wmi commands */
+ mutex_lock(&wil->wmi_mutex);
+ if (test_bit(wil_status_suspending, wil->status))
+ status_flags |= BIT(wil_status_suspending);
+ bitmap_and(wil->status, wil->status, &status_flags,
+ wil_status_last);
+ wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
+ mutex_unlock(&wil->wmi_mutex);
+
+ wil_mask_irq(wil);
+
+ wmi_event_flush(wil);
+
+ flush_workqueue(wil->wq_service);
+ flush_workqueue(wil->wmi_wq);
+
+ no_flash = test_bit(hw_capa_no_flash, wil->hw_capa);
+ if (!no_flash)
+ wil_bl_crash_info(wil, false);
+ wil_disable_irq(wil);
+ rc = wil_target_reset(wil, no_flash);
+ wil6210_clear_irq(wil);
+ wil_enable_irq(wil);
+ wil->txrx_ops.rx_fini(wil);
+ wil->txrx_ops.tx_fini(wil);
+ if (rc) {
+ if (!no_flash)
+ wil_bl_crash_info(wil, true);
+ goto out;
+ }
+
+ if (no_flash) {
+ rc = wil_get_otp_info(wil);
+ } else {
+ rc = wil_get_bl_info(wil);
+ if (rc == -EAGAIN && !load_fw)
+ /* ignore RF error if not going up */
+ rc = 0;
+ }
+ if (rc)
+ goto out;
+
+ wil_set_oob_mode(wil, oob_mode);
+ if (load_fw) {
+ char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
+
+ if (wil->secured_boot) {
+ wil_err(wil, "secured boot is not supported\n");
+ up_write(&wil->mem_lock);
+ return -ENOTSUPP;
+ }
+
+ board_file[0] = '\0';
+ wil_get_board_file(wil, board_file, sizeof(board_file));
+ wil_info(wil, "Use firmware <%s> + board <%s>\n",
+ wil->wil_fw_name, board_file);
+
+ if (!no_flash)
+ wil_bl_prepare_halt(wil);
+
+ wil_halt_cpu(wil);
+ memset(wil->fw_version, 0, sizeof(wil->fw_version));
+ /* Loading f/w from the file */
+ rc = wil_request_firmware(wil, wil->wil_fw_name, true);
+ if (rc)
+ goto out;
+ if (wil->num_of_brd_entries)
+ rc = wil_request_board(wil, board_file);
+ else
+ rc = wil_request_firmware(wil, board_file, true);
+ if (rc)
+ goto out;
+
+ wil_pre_fw_config(wil);
+ wil_release_cpu(wil);
+ }
+
+ /* init after reset */
+ reinit_completion(&wil->wmi_ready);
+ reinit_completion(&wil->wmi_call);
+ reinit_completion(&wil->halp.comp);
+
+ clear_bit(wil_status_resetting, wil->status);
+
+ up_write(&wil->mem_lock);
+
+ if (load_fw) {
+ wil_unmask_irq(wil);
+
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+ if (rc)
+ return rc;
+
+ /* check FW is responsive */
+ rc = wmi_echo(wil);
+ if (rc) {
+ wil_err(wil, "wmi_echo failed, rc %d\n", rc);
+ return rc;
+ }
+
+ wil->txrx_ops.configure_interrupt_moderation(wil);
+
+ /* Enable OFU rdy valid bug fix, to prevent hang in oful34_rx
+ * while there is back-pressure from Host during RX
+ */
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ wil_s(wil, RGF_DMA_MISC_CTL,
+ BIT_OFUL34_RDY_VALID_BUG_FIX_EN);
+
+ rc = wil_restore_vifs(wil);
+ if (rc) {
+ wil_err(wil, "failed to restore vifs, rc %d\n", rc);
+ return rc;
+ }
+
+ wil_collect_fw_info(wil);
+
+ if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
+ wil_ps_update(wil, wil->ps_profile);
+
+ if (wil->platform_ops.notify) {
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_FW_RDY);
+ if (rc) {
+ wil_err(wil, "FW_RDY notify failed, rc %d\n",
+ rc);
+ rc = 0;
+ }
+ }
+ }
+
+ return rc;
+
+out:
+ up_write(&wil->mem_lock);
+ clear_bit(wil_status_resetting, wil->status);
+ return rc;
+}
+
+void wil_fw_error_recovery(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "starting fw error recovery\n");
+
+ if (test_bit(wil_status_resetting, wil->status)) {
+ wil_info(wil, "Reset already in progress\n");
+ return;
+ }
+
+ wil->recovery_state = fw_recovery_pending;
+ schedule_work(&wil->fw_error_worker);
+}
+
+int __wil_up(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ rc = wil_reset(wil, true);
+ if (rc)
+ return rc;
+
+ /* Rx RING. After MAC and beacon */
+ if (rx_ring_order == 0)
+ rx_ring_order = wil->hw_version < HW_VER_TALYN_MB ?
+ WIL_RX_RING_SIZE_ORDER_DEFAULT :
+ WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT;
+
+ rc = wil->txrx_ops.rx_init(wil, rx_ring_order);
+ if (rc)
+ return rc;
+
+ rc = wil->txrx_ops.tx_init(wil);
+ if (rc)
+ return rc;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ wil_dbg_misc(wil, "type: STATION\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_AP:
+ wil_dbg_misc(wil, "type: AP\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_dbg_misc(wil, "type: P2P_CLIENT\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wil_dbg_misc(wil, "type: P2P_GO\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg_misc(wil, "type: Monitor\n");
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ wil_dbg_misc(wil, "NAPI enable\n");
+ napi_enable(&wil->napi_rx);
+ napi_enable(&wil->napi_tx);
+ set_bit(wil_status_napi_en, wil->status);
+
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
+
+ return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "up\n");
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+int __wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ set_bit(wil_status_resetting, wil->status);
+
+ wil6210_bus_request(wil, 0);
+
+ wil_disable_irq(wil);
+ if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
+ napi_disable(&wil->napi_rx);
+ napi_disable(&wil->napi_tx);
+ wil_dbg_misc(wil, "NAPI disable\n");
+ }
+ wil_enable_irq(wil);
+
+ mutex_lock(&wil->vif_mutex);
+ wil_p2p_stop_radio_operations(wil);
+ wil_abort_scan_all_vifs(wil, false);
+ mutex_unlock(&wil->vif_mutex);
+
+ rc = wil_reset(wil, false);
+
+ return rc;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "down\n");
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ mutex_lock(&wil->mutex);
+ rc = __wil_down(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac)
+{
+ int i;
+ int rc = -ENOENT;
+
+ for (i = 0; i < wil->max_assoc_sta; i++) {
+ if (wil->sta[i].mid == mid &&
+ wil->sta[i].status != wil_sta_unused &&
+ ether_addr_equal(wil->sta[i].addr, mac)) {
+ rc = i;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void wil_halp_vote(struct wil6210_priv *wil)
+{
+ unsigned long rc;
+ unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS);
+
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ return;
+
+ mutex_lock(&wil->halp.lock);
+
+ wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
+ wil->halp.ref_cnt);
+
+ if (++wil->halp.ref_cnt == 1) {
+ reinit_completion(&wil->halp.comp);
+ /* mark to IRQ context to handle HALP ICR */
+ wil->halp.handle_icr = true;
+ wil6210_set_halp(wil);
+ rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
+ if (!rc) {
+ wil_err(wil, "HALP vote timed out\n");
+ /* Mask HALP as done in case the interrupt is raised */
+ wil->halp.handle_icr = false;
+ wil6210_mask_halp(wil);
+ } else {
+ wil_dbg_irq(wil,
+ "halp_vote: HALP vote completed after %d ms\n",
+ jiffies_to_msecs(to_jiffies - rc));
+ }
+ }
+
+ wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n",
+ wil->halp.ref_cnt);
+
+ mutex_unlock(&wil->halp.lock);
+}
+
+void wil_halp_unvote(struct wil6210_priv *wil)
+{
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ return;
+
+ WARN_ON(wil->halp.ref_cnt == 0);
+
+ mutex_lock(&wil->halp.lock);
+
+ wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n",
+ wil->halp.ref_cnt);
+
+ if (--wil->halp.ref_cnt == 0) {
+ wil6210_clear_halp(wil);
+ wil_dbg_irq(wil, "HALP unvote\n");
+ }
+
+ wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n",
+ wil->halp.ref_cnt);
+
+ mutex_unlock(&wil->halp.lock);
+}
+
+void wil_init_txrx_ops(struct wil6210_priv *wil)
+{
+ if (wil->use_enhanced_dma_hw)
+ wil_init_txrx_ops_edma(wil);
+ else
+ wil_init_txrx_ops_legacy_dma(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644
index 000000000..ee7d7e9c2
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include "wil6210.h"
+#include "txrx.h"
+
+bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
+ struct net_device *ndev, bool up, bool ok)
+{
+ int i;
+ struct wil6210_vif *vif;
+ struct net_device *ndev_i;
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+ if (vif) {
+ ndev_i = vif_to_ndev(vif);
+ if (ndev_i != ndev)
+ if ((up && (ndev_i->flags & IFF_UP)) ||
+ (ok && netif_carrier_ok(ndev_i)))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool wil_has_active_ifaces(struct wil6210_priv *wil, bool up, bool ok)
+{
+ /* use NULL ndev argument to check all interfaces */
+ return wil_has_other_active_ifaces(wil, NULL, up, ok);
+}
+
+static int wil_open(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int rc = 0;
+
+ wil_dbg_misc(wil, "open\n");
+
+ if (debug_fw ||
+ test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
+ wil_err(wil, "while in debug_fw or wmi_only mode\n");
+ return -EINVAL;
+ }
+
+ if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
+ wil_dbg_misc(wil, "open, first iface\n");
+ rc = wil_pm_runtime_get(wil);
+ if (rc < 0)
+ return rc;
+
+ rc = wil_up(wil);
+ if (rc)
+ wil_pm_runtime_put(wil);
+ }
+
+ return rc;
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int rc = 0;
+
+ wil_dbg_misc(wil, "stop\n");
+
+ if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
+ wil_dbg_misc(wil, "stop, last iface\n");
+ rc = wil_down(wil);
+ if (!rc)
+ wil_pm_runtime_put(wil);
+ }
+
+ return rc;
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+ .ndo_open = wil_open,
+ .ndo_stop = wil_stop,
+ .ndo_start_xmit = wil_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle(wil, &quota);
+ done = budget - quota;
+
+ if (done < budget) {
+ napi_complete_done(napi, done);
+ wil6210_unmask_irq_rx(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
+static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle_edma(wil, &quota);
+ done = budget - quota;
+
+ if (done < budget) {
+ napi_complete_done(napi, done);
+ wil6210_unmask_irq_rx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
+static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done = 0;
+ uint i;
+
+ /* always process ALL Tx complete, regardless budget - it is fast */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct wil_ring *ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
+ struct wil6210_vif *vif;
+
+ if (!ring->va || !txdata->enabled ||
+ txdata->mid >= GET_MAX_VIFS(wil))
+ continue;
+
+ vif = wil->vifs[txdata->mid];
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "Invalid MID %d\n", txdata->mid);
+ continue;
+ }
+
+ tx_done += wil_tx_complete(vif, i);
+ }
+
+ if (tx_done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_tx(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
+static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done;
+ /* There is only one status TX ring */
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ if (!sring->va)
+ return 0;
+
+ tx_done = wil_tx_sring_handler(wil, sring);
+
+ if (tx_done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_tx_edma(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
+static void wil_dev_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->max_mtu = mtu_max;
+ dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
+}
+
+static void wil_vif_deinit(struct wil6210_vif *vif)
+{
+ del_timer_sync(&vif->scan_timer);
+ del_timer_sync(&vif->p2p.discovery_timer);
+ cancel_work_sync(&vif->disconnect_worker);
+ cancel_work_sync(&vif->p2p.discovery_expired_work);
+ cancel_work_sync(&vif->p2p.delayed_listen_work);
+ wil_probe_client_flush(vif);
+ cancel_work_sync(&vif->probe_client_worker);
+ cancel_work_sync(&vif->enable_tx_key_worker);
+}
+
+void wil_vif_free(struct wil6210_vif *vif)
+{
+ struct net_device *ndev = vif_to_ndev(vif);
+
+ wil_vif_deinit(vif);
+ free_netdev(ndev);
+}
+
+static void wil_ndev_destructor(struct net_device *ndev)
+{
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+
+ wil_vif_deinit(vif);
+}
+
+static void wil_connect_timer_fn(struct timer_list *t)
+{
+ struct wil6210_vif *vif = from_timer(vif, t, connect_timer);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ bool q;
+
+ wil_err(wil, "Connect timeout detected, disconnect station\n");
+
+ /* reschedule to thread context - disconnect won't
+ * run from atomic context.
+ * queue on wmi_wq to prevent race with connect event.
+ */
+ q = queue_work(wil->wmi_wq, &vif->disconnect_worker);
+ wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q);
+}
+
+static void wil_scan_timer_fn(struct timer_list *t)
+{
+ struct wil6210_vif *vif = from_timer(vif, t, scan_timer);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ clear_bit(wil_status_fwready, wil->status);
+ wil_err(wil, "Scan timeout detected, start fw error recovery\n");
+ wil_fw_error_recovery(wil);
+}
+
+static void wil_p2p_discovery_timer_fn(struct timer_list *t)
+{
+ struct wil6210_vif *vif = from_timer(vif, t, p2p.discovery_timer);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
+
+ schedule_work(&vif->p2p.discovery_expired_work);
+}
+
+static void wil_vif_init(struct wil6210_vif *vif)
+{
+ vif->bcast_ring = -1;
+
+ mutex_init(&vif->probe_client_mutex);
+
+ timer_setup(&vif->connect_timer, wil_connect_timer_fn, 0);
+ timer_setup(&vif->scan_timer, wil_scan_timer_fn, 0);
+ timer_setup(&vif->p2p.discovery_timer, wil_p2p_discovery_timer_fn, 0);
+
+ INIT_WORK(&vif->probe_client_worker, wil_probe_client_worker);
+ INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&vif->p2p.discovery_expired_work, wil_p2p_listen_expired);
+ INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
+ INIT_WORK(&vif->enable_tx_key_worker, wil_enable_tx_key_worker);
+
+ INIT_LIST_HEAD(&vif->probe_client_pending);
+
+ vif->net_queue_stopped = 1;
+}
+
+static u8 wil_vif_find_free_mid(struct wil6210_priv *wil)
+{
+ u8 i;
+
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ if (!wil->vifs[i])
+ return i;
+ }
+
+ return U8_MAX;
+}
+
+struct wil6210_vif *
+wil_vif_alloc(struct wil6210_priv *wil, const char *name,
+ unsigned char name_assign_type, enum nl80211_iftype iftype)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wil6210_vif *vif;
+ u8 mid;
+
+ mid = wil_vif_find_free_mid(wil);
+ if (mid == U8_MAX) {
+ wil_err(wil, "no available virtual interface\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ndev = alloc_netdev(sizeof(*vif), name, name_assign_type,
+ wil_dev_setup);
+ if (!ndev) {
+ dev_err(wil_to_dev(wil), "alloc_netdev failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ if (mid == 0) {
+ wil->main_ndev = ndev;
+ } else {
+ ndev->priv_destructor = wil_ndev_destructor;
+ ndev->needs_free_netdev = true;
+ }
+
+ vif = ndev_to_vif(ndev);
+ vif->ndev = ndev;
+ vif->wil = wil;
+ vif->mid = mid;
+ wil_vif_init(vif);
+
+ wdev = &vif->wdev;
+ wdev->wiphy = wil->wiphy;
+ wdev->iftype = iftype;
+
+ ndev->netdev_ops = &wil_netdev_ops;
+ wil_set_ethtoolops(ndev);
+ ndev->ieee80211_ptr = wdev;
+ ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GRO |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->features |= ndev->hw_features;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+ return vif;
+}
+
+void *wil_if_alloc(struct device *dev)
+{
+ struct wil6210_priv *wil;
+ struct wil6210_vif *vif;
+ int rc = 0;
+
+ wil = wil_cfg80211_init(dev);
+ if (IS_ERR(wil)) {
+ dev_err(dev, "wil_cfg80211_init failed\n");
+ return wil;
+ }
+
+ rc = wil_priv_init(wil);
+ if (rc) {
+ dev_err(dev, "wil_priv_init failed\n");
+ goto out_cfg;
+ }
+
+ wil_dbg_misc(wil, "if_alloc\n");
+
+ vif = wil_vif_alloc(wil, "wlan%d", NET_NAME_UNKNOWN,
+ NL80211_IFTYPE_STATION);
+ if (IS_ERR(vif)) {
+ dev_err(dev, "wil_vif_alloc failed\n");
+ rc = -ENOMEM;
+ goto out_priv;
+ }
+
+ wil->radio_wdev = vif_to_wdev(vif);
+
+ return wil;
+
+out_priv:
+ wil_priv_deinit(wil);
+
+out_cfg:
+ wil_cfg80211_deinit(wil);
+
+ return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil->main_ndev;
+
+ wil_dbg_misc(wil, "if_free\n");
+
+ if (!ndev)
+ return;
+
+ wil_priv_deinit(wil);
+
+ wil->main_ndev = NULL;
+ wil_ndev_destructor(ndev);
+ free_netdev(ndev);
+
+ wil_cfg80211_deinit(wil);
+}
+
+int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif)
+{
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ bool any_active = wil_has_active_ifaces(wil, true, false);
+ int rc;
+
+ ASSERT_RTNL();
+
+ if (wil->vifs[vif->mid]) {
+ dev_err(&ndev->dev, "VIF with mid %d already in use\n",
+ vif->mid);
+ return -EEXIST;
+ }
+ if (any_active && vif->mid != 0) {
+ rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr,
+ wdev->iftype);
+ if (rc)
+ return rc;
+ }
+ rc = cfg80211_register_netdevice(ndev);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+ if (any_active && vif->mid != 0)
+ wmi_port_delete(wil, vif->mid);
+ return rc;
+ }
+
+ wil->vifs[vif->mid] = vif;
+ return 0;
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil->wiphy;
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+
+ wil_dbg_misc(wil, "entered");
+
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+
+ rc = wiphy_register(wiphy);
+ if (rc < 0) {
+ wil_err(wil, "failed to register wiphy, err %d\n", rc);
+ return rc;
+ }
+
+ init_dummy_netdev(&wil->napi_ndev);
+ if (wil->use_enhanced_dma_hw) {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx_edma);
+ netif_napi_add_tx(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx_edma);
+ } else {
+ netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ wil6210_netdev_poll_rx);
+ netif_napi_add_tx(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx);
+ }
+
+ wil_update_net_queues_bh(wil, vif, NULL, true);
+
+ rtnl_lock();
+ wiphy_lock(wiphy);
+ rc = wil_vif_add(wil, vif);
+ wiphy_unlock(wiphy);
+ rtnl_unlock();
+ if (rc < 0)
+ goto out_wiphy;
+
+ return 0;
+
+out_wiphy:
+ wiphy_unregister(wiphy);
+ return rc;
+}
+
+void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
+{
+ struct wil6210_vif *vif;
+ struct net_device *ndev;
+ bool any_active = wil_has_active_ifaces(wil, true, false);
+
+ ASSERT_RTNL();
+ if (mid >= GET_MAX_VIFS(wil)) {
+ wil_err(wil, "invalid MID: %d\n", mid);
+ return;
+ }
+
+ vif = wil->vifs[mid];
+ if (!vif) {
+ wil_err(wil, "MID %d not registered\n", mid);
+ return;
+ }
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING);
+ mutex_unlock(&wil->mutex);
+
+ ndev = vif_to_ndev(vif);
+ /* during unregister_netdevice cfg80211_leave may perform operations
+ * such as stop AP, disconnect, so we only clear the VIF afterwards
+ */
+ cfg80211_unregister_netdevice(ndev);
+
+ if (any_active && vif->mid != 0)
+ wmi_port_delete(wil, vif->mid);
+
+ /* make sure no one is accessing the VIF before removing */
+ mutex_lock(&wil->vif_mutex);
+ wil->vifs[mid] = NULL;
+ /* ensure NAPI code will see the NULL VIF */
+ wmb();
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ napi_synchronize(&wil->napi_rx);
+ napi_synchronize(&wil->napi_tx);
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ flush_work(&wil->wmi_event_worker);
+ del_timer_sync(&vif->connect_timer);
+ cancel_work_sync(&vif->disconnect_worker);
+ wil_probe_client_flush(vif);
+ cancel_work_sync(&vif->probe_client_worker);
+ cancel_work_sync(&vif->enable_tx_key_worker);
+ /* for VIFs, ndev will be freed by destructor after RTNL is unlocked.
+ * the main interface will be freed in wil_if_free, we need to keep it
+ * a bit longer so logging macros will work.
+ */
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+
+ wil_dbg_misc(wil, "if_remove\n");
+
+ rtnl_lock();
+ wiphy_lock(wiphy);
+ wil_vif_remove(wil, 0);
+ wiphy_unlock(wiphy);
+ rtnl_unlock();
+
+ netif_napi_del(&wil->napi_tx);
+ netif_napi_del(&wil->napi_rx);
+
+ wiphy_unregister(wiphy);
+}
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
new file mode 100644
index 000000000..f26bf046d
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define P2P_WILDCARD_SSID "DIRECT-"
+#define P2P_DMG_SOCIAL_CHANNEL 2
+#define P2P_SEARCH_DURATION_MS 500
+#define P2P_DEFAULT_BI 100
+
+static int wil_p2p_start_listen(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_p2p_info *p2p = &vif->p2p;
+ u8 channel = p2p->listen_chan.hw_value;
+ int rc;
+
+ lockdep_assert_held(&wil->mutex);
+
+ rc = wmi_p2p_cfg(vif, channel, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "wmi_p2p_cfg failed\n");
+ goto out;
+ }
+
+ rc = wmi_set_ssid(vif, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "wmi_set_ssid failed\n");
+ goto out_stop;
+ }
+
+ rc = wmi_start_listen(vif);
+ if (rc) {
+ wil_err(wil, "wmi_start_listen failed\n");
+ goto out_stop;
+ }
+
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(p2p->listen_duration));
+out_stop:
+ if (rc)
+ wmi_stop_discovery(vif);
+
+out:
+ return rc;
+}
+
+bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
+{
+ return (request->n_channels == 1) &&
+ (request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL);
+}
+
+int wil_p2p_search(struct wil6210_vif *vif,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wil_p2p_info *p2p = &vif->p2p;
+
+ wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL);
+
+ lockdep_assert_held(&wil->mutex);
+
+ if (p2p->discovery_started) {
+ wil_err(wil, "search failed. discovery already ongoing\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = wmi_p2p_cfg(vif, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "wmi_p2p_cfg failed\n");
+ goto out;
+ }
+
+ rc = wmi_set_ssid(vif, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "wmi_set_ssid failed\n");
+ goto out_stop;
+ }
+
+ /* Set application IE to probe request and probe response */
+ rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ,
+ request->ie_len, request->ie);
+ if (rc) {
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n");
+ goto out_stop;
+ }
+
+ /* supplicant doesn't provide Probe Response IEs. As a workaround -
+ * re-use Probe Request IEs
+ */
+ rc = wmi_set_ie(vif, WMI_FRAME_PROBE_RESP,
+ request->ie_len, request->ie);
+ if (rc) {
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n");
+ goto out_stop;
+ }
+
+ rc = wmi_start_search(vif);
+ if (rc) {
+ wil_err(wil, "wmi_start_search failed\n");
+ goto out_stop;
+ }
+
+ p2p->discovery_started = 1;
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
+
+out_stop:
+ if (rc)
+ wmi_stop_discovery(vif);
+
+out:
+ return rc;
+}
+
+int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
+ unsigned int duration, struct ieee80211_channel *chan,
+ u64 *cookie)
+{
+ struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
+ struct wil_p2p_info *p2p = &vif->p2p;
+ int rc;
+
+ if (!chan)
+ return -EINVAL;
+
+ wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration);
+
+ mutex_lock(&wil->mutex);
+
+ if (p2p->discovery_started) {
+ wil_err(wil, "discovery already ongoing\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ memcpy(&p2p->listen_chan, chan, sizeof(*chan));
+ *cookie = ++p2p->cookie;
+ p2p->listen_duration = duration;
+
+ mutex_lock(&wil->vif_mutex);
+ if (vif->scan_request) {
+ wil_dbg_misc(wil, "Delaying p2p listen until scan done\n");
+ p2p->pending_listen_wdev = wdev;
+ p2p->discovery_started = 1;
+ rc = 0;
+ mutex_unlock(&wil->vif_mutex);
+ goto out;
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ rc = wil_p2p_start_listen(vif);
+ if (rc)
+ goto out;
+
+ p2p->discovery_started = 1;
+ if (vif->mid == 0)
+ wil->radio_wdev = wdev;
+
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
+ GFP_KERNEL);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+u8 wil_p2p_stop_discovery(struct wil6210_vif *vif)
+{
+ struct wil_p2p_info *p2p = &vif->p2p;
+ u8 started = p2p->discovery_started;
+
+ if (p2p->discovery_started) {
+ if (p2p->pending_listen_wdev) {
+ /* discovery not really started, only pending */
+ p2p->pending_listen_wdev = NULL;
+ } else {
+ del_timer_sync(&p2p->discovery_timer);
+ wmi_stop_discovery(vif);
+ }
+ p2p->discovery_started = 0;
+ }
+
+ return started;
+}
+
+int wil_p2p_cancel_listen(struct wil6210_vif *vif, u64 cookie)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_p2p_info *p2p = &vif->p2p;
+ u8 started;
+
+ mutex_lock(&wil->mutex);
+
+ if (cookie != p2p->cookie) {
+ wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+ p2p->cookie, cookie);
+ mutex_unlock(&wil->mutex);
+ return -ENOENT;
+ }
+
+ started = wil_p2p_stop_discovery(vif);
+
+ mutex_unlock(&wil->mutex);
+
+ if (!started) {
+ wil_err(wil, "listen not started\n");
+ return -ENOENT;
+ }
+
+ mutex_lock(&wil->vif_mutex);
+ cfg80211_remain_on_channel_expired(vif_to_radio_wdev(wil, vif),
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ if (vif->mid == 0)
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ mutex_unlock(&wil->vif_mutex);
+ return 0;
+}
+
+void wil_p2p_listen_expired(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, discovery_expired_work);
+ struct wil6210_vif *vif = container_of(p2p,
+ struct wil6210_vif, p2p);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 started;
+
+ wil_dbg_misc(wil, "p2p_listen_expired\n");
+
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(vif);
+ mutex_unlock(&wil->mutex);
+
+ if (!started)
+ return;
+
+ mutex_lock(&wil->vif_mutex);
+ cfg80211_remain_on_channel_expired(vif_to_radio_wdev(wil, vif),
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ if (vif->mid == 0)
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ mutex_unlock(&wil->vif_mutex);
+}
+
+void wil_p2p_search_expired(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, discovery_expired_work);
+ struct wil6210_vif *vif = container_of(p2p,
+ struct wil6210_vif, p2p);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 started;
+
+ wil_dbg_misc(wil, "p2p_search_expired\n");
+
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(vif);
+ mutex_unlock(&wil->mutex);
+
+ if (started) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ mutex_lock(&wil->vif_mutex);
+ if (vif->scan_request) {
+ cfg80211_scan_done(vif->scan_request, &info);
+ vif->scan_request = NULL;
+ if (vif->mid == 0)
+ wil->radio_wdev =
+ wil->main_ndev->ieee80211_ptr;
+ }
+ mutex_unlock(&wil->vif_mutex);
+ }
+}
+
+void wil_p2p_delayed_listen_work(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, delayed_listen_work);
+ struct wil6210_vif *vif = container_of(p2p,
+ struct wil6210_vif, p2p);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+
+ mutex_lock(&wil->mutex);
+
+ wil_dbg_misc(wil, "Checking delayed p2p listen\n");
+ if (!p2p->discovery_started || !p2p->pending_listen_wdev)
+ goto out;
+
+ mutex_lock(&wil->vif_mutex);
+ if (vif->scan_request) {
+ /* another scan started, wait again... */
+ mutex_unlock(&wil->vif_mutex);
+ goto out;
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+ rc = wil_p2p_start_listen(vif);
+
+ mutex_lock(&wil->vif_mutex);
+ if (rc) {
+ cfg80211_remain_on_channel_expired(p2p->pending_listen_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ if (vif->mid == 0)
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ } else {
+ cfg80211_ready_on_channel(p2p->pending_listen_wdev, p2p->cookie,
+ &p2p->listen_chan,
+ p2p->listen_duration, GFP_KERNEL);
+ if (vif->mid == 0)
+ wil->radio_wdev = p2p->pending_listen_wdev;
+ }
+ p2p->pending_listen_wdev = NULL;
+ mutex_unlock(&wil->vif_mutex);
+
+out:
+ mutex_unlock(&wil->mutex);
+}
+
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wil_p2p_info *p2p = &vif->p2p;
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ lockdep_assert_held(&wil->mutex);
+ lockdep_assert_held(&wil->vif_mutex);
+
+ if (wil->radio_wdev != wil->p2p_wdev)
+ goto out;
+
+ if (!p2p->discovery_started) {
+ /* Regular scan on the p2p device */
+ if (vif->scan_request &&
+ vif->scan_request->wdev == wil->p2p_wdev)
+ wil_abort_scan(vif, true);
+ goto out;
+ }
+
+ /* Search or listen on p2p device */
+ mutex_unlock(&wil->vif_mutex);
+ wil_p2p_stop_discovery(vif);
+ mutex_lock(&wil->vif_mutex);
+
+ if (vif->scan_request) {
+ /* search */
+ cfg80211_scan_done(vif->scan_request, &info);
+ vif->scan_request = NULL;
+ } else {
+ /* listen */
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ }
+
+out:
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644
index 000000000..ce40d9490
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/suspend.h>
+#include "wil6210.h"
+#include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
+
+static int n_msi = 3;
+module_param(n_msi, int, 0444);
+MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) ");
+
+bool ftm_mode;
+module_param(ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
+
+static int wil6210_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused);
+
+static
+int wil_set_capabilities(struct wil6210_priv *wil)
+{
+ const char *wil_fw_name;
+ u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
+ u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
+ RGF_USER_REVISION_ID_MASK);
+ int platform_capa;
+ struct fw_map *iccm_section, *sct;
+
+ bitmap_zero(wil->hw_capa, hw_capa_last);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
+ wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
+ WIL_FW_NAME_DEFAULT;
+ wil->chip_revision = chip_revision;
+
+ switch (jtag_id) {
+ case JTAG_DEV_ID_SPARROW:
+ memcpy(fw_mapping, sparrow_fw_mapping,
+ sizeof(sparrow_fw_mapping));
+ switch (chip_revision) {
+ case REVISION_ID_SPARROW_D0:
+ wil->hw_name = "Sparrow D0";
+ wil->hw_version = HW_VER_SPARROW_D0;
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
+ WIL_FW_NAME_SPARROW_PLUS;
+
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
+ sct = wil_find_fw_mapping("mac_rgf_ext");
+ if (!sct) {
+ wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n");
+ return -EINVAL;
+ }
+ memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct));
+ break;
+ case REVISION_ID_SPARROW_B0:
+ wil->hw_name = "Sparrow B0";
+ wil->hw_version = HW_VER_SPARROW_B0;
+ break;
+ default:
+ wil->hw_name = "Unknown";
+ wil->hw_version = HW_VER_UNKNOWN;
+ break;
+ }
+ wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
+ break;
+ case JTAG_DEV_ID_TALYN:
+ wil->hw_name = "Talyn-MA";
+ wil->hw_version = HW_VER_TALYN;
+ memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
+ BIT_NO_FLASH_INDICATION)
+ set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
+ break;
+ case JTAG_DEV_ID_TALYN_MB:
+ wil->hw_name = "Talyn-MB";
+ wil->hw_version = HW_VER_TALYN_MB;
+ memcpy(fw_mapping, talyn_mb_fw_mapping,
+ sizeof(talyn_mb_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ set_bit(hw_capa_no_flash, wil->hw_capa);
+ wil->use_enhanced_dma_hw = true;
+ wil->use_rx_hw_reordering = true;
+ wil->use_compressed_rx_status = true;
+ wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
+ WIL_FW_NAME_TALYN;
+ if (wil_fw_verify_file_exists(wil, wil_fw_name))
+ wil->wil_fw_name = wil_fw_name;
+ break;
+ default:
+ wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
+ jtag_id, chip_revision);
+ wil->hw_name = "Unknown";
+ wil->hw_version = HW_VER_UNKNOWN;
+ return -EINVAL;
+ }
+
+ wil_init_txrx_ops(wil);
+
+ iccm_section = wil_find_fw_mapping("fw_code");
+ if (!iccm_section) {
+ wil_err(wil, "fw_code section not found in fw_mapping\n");
+ return -EINVAL;
+ }
+ wil->iccm_base = iccm_section->host;
+
+ wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name,
+ test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : "");
+
+ /* Get platform capabilities */
+ if (wil->platform_ops.get_capa) {
+ platform_capa =
+ wil->platform_ops.get_capa(wil->platform_handle);
+ memcpy(wil->platform_capa, &platform_capa,
+ min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+ }
+
+ wil_info(wil, "platform_capa 0x%lx\n", *wil->platform_capa);
+
+ /* extract FW capabilities from file without loading the FW */
+ wil_request_firmware(wil, wil->wil_fw_name, false);
+ wil_refresh_fw_capabilities(wil);
+
+ return 0;
+}
+
+void wil_disable_irq(struct wil6210_priv *wil)
+{
+ int irq = wil->pdev->irq;
+
+ disable_irq(irq);
+ if (wil->n_msi == 3) {
+ disable_irq(irq + 1);
+ disable_irq(irq + 2);
+ }
+}
+
+void wil_enable_irq(struct wil6210_priv *wil)
+{
+ int irq = wil->pdev->irq;
+
+ enable_irq(irq);
+ if (wil->n_msi == 3) {
+ enable_irq(irq + 1);
+ enable_irq(irq + 2);
+ }
+}
+
+static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif;
+ int i;
+
+ for (i = 1; i < GET_MAX_VIFS(wil); i++) {
+ vif = wil->vifs[i];
+ if (vif) {
+ wil_vif_prepare_stop(vif);
+ wil_vif_remove(wil, vif->mid);
+ }
+ }
+}
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+ int rc;
+ /* on platforms with buggy ACPI, pdev->msi_enabled may be set to
+ * allow pci_enable_device to work. This indicates INTx was not routed
+ * and only MSI should be used
+ */
+ int msi_only = pdev->msi_enabled;
+
+ wil_dbg_misc(wil, "if_pcie_enable\n");
+
+ pci_set_master(pdev);
+
+ /* how many MSI interrupts to request? */
+ switch (n_msi) {
+ case 3:
+ case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi);
+ break;
+ case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ break;
+ default:
+ wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi);
+ n_msi = 1;
+ }
+
+ if (n_msi == 3 &&
+ pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ n_msi = 1;
+ }
+
+ if (n_msi == 1 && pci_enable_msi(pdev)) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ n_msi = 0;
+ }
+
+ wil->n_msi = n_msi;
+
+ if (wil->n_msi == 0 && msi_only) {
+ wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
+ rc = -ENODEV;
+ goto stop_master;
+ }
+
+ rc = wil6210_init_irq(wil, pdev->irq);
+ if (rc)
+ goto release_vectors;
+
+ /* need reset here to obtain MAC */
+ mutex_lock(&wil->mutex);
+ rc = wil_reset(wil, false);
+ mutex_unlock(&wil->mutex);
+ if (rc)
+ goto release_irq;
+
+ return 0;
+
+ release_irq:
+ wil6210_fini_irq(wil, pdev->irq);
+ release_vectors:
+ /* safe to call if no allocation */
+ pci_free_irq_vectors(pdev);
+ stop_master:
+ pci_clear_master(pdev);
+ return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+
+ wil_dbg_misc(wil, "if_pcie_disable\n");
+
+ pci_clear_master(pdev);
+ /* disable and release IRQ */
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ /* TODO: disable HW */
+
+ return 0;
+}
+
+static int wil_platform_rop_ramdump(void *wil_handle, void *buf, uint32_t size)
+{
+ struct wil6210_priv *wil = wil_handle;
+
+ if (!wil)
+ return -EINVAL;
+
+ return wil_fw_copy_crash_dump(wil, buf, size);
+}
+
+static int wil_platform_rop_fw_recovery(void *wil_handle)
+{
+ struct wil6210_priv *wil = wil_handle;
+
+ if (!wil)
+ return -EINVAL;
+
+ wil_fw_error_recovery(wil);
+
+ return 0;
+}
+
+static void wil_platform_ops_uninit(struct wil6210_priv *wil)
+{
+ if (wil->platform_ops.uninit)
+ wil->platform_ops.uninit(wil->platform_handle);
+ memset(&wil->platform_ops, 0, sizeof(wil->platform_ops));
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct wil6210_priv *wil;
+ struct device *dev = &pdev->dev;
+ int rc;
+ const struct wil_platform_rops rops = {
+ .ramdump = wil_platform_rop_ramdump,
+ .fw_recovery = wil_platform_rop_fw_recovery,
+ };
+ u32 bar_size = pci_resource_len(pdev, 0);
+ int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
+ int i, start_idx;
+
+ /* check HW */
+ dev_info(&pdev->dev, WIL_NAME
+ " device found [%04x:%04x] (rev %x) bar size 0x%x\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
+ bar_size);
+
+ if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
+ (bar_size > WIL6210_MAX_MEM_SIZE)) {
+ dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
+ bar_size);
+ return -ENODEV;
+ }
+
+ wil = wil_if_alloc(dev);
+ if (IS_ERR(wil)) {
+ rc = (int)PTR_ERR(wil);
+ dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+ return rc;
+ }
+
+ wil->pdev = pdev;
+ pci_set_drvdata(pdev, wil);
+ wil->bar_size = bar_size;
+ /* rollback to if_free */
+
+ wil->platform_handle =
+ wil_platform_init(&pdev->dev, &wil->platform_ops, &rops, wil);
+ if (!wil->platform_handle) {
+ rc = -ENODEV;
+ wil_err(wil, "wil_platform_init failed\n");
+ goto if_free;
+ }
+ /* rollback to err_plat */
+ rc = pci_enable_device(pdev);
+ if (rc && pdev->msi_enabled == 0) {
+ wil_err(wil,
+ "pci_enable_device failed, retry with MSI only\n");
+ /* Work around for platforms that can't allocate IRQ:
+ * retry with MSI only
+ */
+ pdev->msi_enabled = 1;
+ rc = pci_enable_device(pdev);
+ }
+ if (rc) {
+ wil_err(wil,
+ "pci_enable_device failed, even with MSI only\n");
+ goto err_plat;
+ }
+ /* rollback to err_disable_pdev */
+ pci_set_power_state(pdev, PCI_D0);
+
+ rc = pci_request_region(pdev, 0, WIL_NAME);
+ if (rc) {
+ wil_err(wil, "pci_request_region failed\n");
+ goto err_disable_pdev;
+ }
+ /* rollback to err_release_reg */
+
+ wil->csr = pci_ioremap_bar(pdev, 0);
+ if (!wil->csr) {
+ wil_err(wil, "pci_ioremap_bar failed\n");
+ rc = -ENODEV;
+ goto err_release_reg;
+ }
+ /* rollback to err_iounmap */
+ wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
+
+ rc = wil_set_capabilities(wil);
+ if (rc) {
+ wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
+ goto err_iounmap;
+ }
+
+ /* device supports >32bit addresses.
+ * for legacy DMA start from 48 bit.
+ */
+ start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
+
+ for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
+ }
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
+ }
+
+ if (wil->dma_addr_size == 0)
+ goto err_iounmap;
+
+ wil6210_clear_irq(wil);
+
+ /* FW should raise IRQ when ready */
+ rc = wil_if_pcie_enable(wil);
+ if (rc) {
+ wil_err(wil, "Enable device failed\n");
+ goto err_iounmap;
+ }
+ /* rollback to bus_disable */
+
+ wil_clear_fw_log_addr(wil);
+ rc = wil_if_add(wil);
+ if (rc) {
+ wil_err(wil, "wil_if_add failed: %d\n", rc);
+ goto bus_disable;
+ }
+
+ /* in case of WMI-only FW, perform full reset and FW loading */
+ if (test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
+ wil_dbg_misc(wil, "Loading WMI only FW\n");
+ mutex_lock(&wil->mutex);
+ rc = wil_reset(wil, true);
+ mutex_unlock(&wil->mutex);
+ if (rc) {
+ wil_err(wil, "failed to load WMI only FW\n");
+ /* ignore the error to allow debugging */
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_PM))
+ wil->pm_notify.notifier_call = wil6210_pm_notify;
+
+ rc = register_pm_notifier(&wil->pm_notify);
+ if (rc)
+ /* Do not fail the driver initialization, as suspend can
+ * be prevented in a later phase if needed
+ */
+ wil_err(wil, "register_pm_notifier failed: %d\n", rc);
+
+ wil6210_debugfs_init(wil);
+
+ wil_pm_runtime_allow(wil);
+
+ return 0;
+
+bus_disable:
+ wil_if_pcie_disable(wil);
+err_iounmap:
+ pci_iounmap(pdev, wil->csr);
+err_release_reg:
+ pci_release_region(pdev, 0);
+err_disable_pdev:
+ pci_disable_device(pdev);
+err_plat:
+ wil_platform_ops_uninit(wil);
+if_free:
+ wil_if_free(wil);
+
+ return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ void __iomem *csr = wil->csr;
+
+ wil_dbg_misc(wil, "pcie_remove\n");
+
+ unregister_pm_notifier(&wil->pm_notify);
+
+ wil_pm_runtime_forbid(wil);
+
+ wil6210_debugfs_remove(wil);
+ rtnl_lock();
+ wiphy_lock(wil->wiphy);
+ wil_p2p_wdev_free(wil);
+ wil_remove_all_additional_vifs(wil);
+ wiphy_unlock(wil->wiphy);
+ rtnl_unlock();
+ wil_if_remove(wil);
+ wil_if_pcie_disable(wil);
+ pci_iounmap(pdev, csr);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ wil_platform_ops_uninit(wil);
+ wil_if_free(wil);
+}
+
+static const struct pci_device_id wil6210_pcie_ids[] = {
+ { PCI_DEVICE(0x1ae9, 0x0310) },
+ { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
+ { PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+ int rc = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ bool keep_radio_on, active_ifaces;
+
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+
+ mutex_lock(&wil->vif_mutex);
+ active_ifaces = wil_has_active_ifaces(wil, true, false);
+ mutex_unlock(&wil->vif_mutex);
+ keep_radio_on = active_ifaces && wil->keep_radio_on_during_sleep;
+
+ rc = wil_can_suspend(wil, is_runtime);
+ if (rc)
+ goto out;
+
+ rc = wil_suspend(wil, is_runtime, keep_radio_on);
+ if (!rc) {
+ /* In case radio stays on, platform device will control
+ * PCIe master
+ */
+ if (!keep_radio_on) {
+ /* disable bus mastering */
+ pci_clear_master(pdev);
+ wil->suspend_stats.r_off.successful_suspends++;
+ } else {
+ wil->suspend_stats.r_on.successful_suspends++;
+ }
+ }
+out:
+ return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+ int rc = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ bool keep_radio_on, active_ifaces;
+
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
+
+ mutex_lock(&wil->vif_mutex);
+ active_ifaces = wil_has_active_ifaces(wil, true, false);
+ mutex_unlock(&wil->vif_mutex);
+ keep_radio_on = active_ifaces && wil->keep_radio_on_during_sleep;
+
+ /* In case radio stays on, platform device will control
+ * PCIe master
+ */
+ if (!keep_radio_on)
+ /* allow master */
+ pci_set_master(pdev);
+ rc = wil_resume(wil, is_runtime, keep_radio_on);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d)\n", rc);
+ if (!keep_radio_on) {
+ pci_clear_master(pdev);
+ wil->suspend_stats.r_off.failed_resumes++;
+ } else {
+ wil->suspend_stats.r_on.failed_resumes++;
+ }
+ } else {
+ if (keep_radio_on)
+ wil->suspend_stats.r_on.successful_resumes++;
+ else
+ wil->suspend_stats.r_off.successful_resumes++;
+ }
+
+ return rc;
+}
+
+static int wil6210_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct wil6210_priv *wil = container_of(
+ notify_block, struct wil6210_priv, pm_notify);
+ int rc = 0;
+ enum wil_platform_event evt;
+
+ wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ break;
+ evt = WIL_PLATFORM_EVT_PRE_SUSPEND;
+ if (wil->platform_ops.notify)
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ evt);
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ evt = WIL_PLATFORM_EVT_POST_SUSPEND;
+ if (wil->platform_ops.notify)
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ evt);
+ break;
+ default:
+ wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode);
+ break;
+ }
+
+ wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc);
+ return rc;
+}
+
+static int __maybe_unused wil6210_pm_suspend(struct device *dev)
+{
+ return wil6210_suspend(dev, false);
+}
+
+static int __maybe_unused wil6210_pm_resume(struct device *dev)
+{
+ return wil6210_resume(dev, false);
+}
+
+static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+
+ wil_dbg_pm(wil, "Runtime idle\n");
+
+ return wil_can_suspend(wil, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
+{
+ return wil6210_resume(dev, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 1;
+ }
+
+ return wil6210_suspend(dev, true);
+}
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+ SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+ wil6210_pm_runtime_resume,
+ wil6210_pm_runtime_idle)
+};
+
+static struct pci_driver wil6210_driver = {
+ .probe = wil_pcie_probe,
+ .remove = wil_pcie_remove,
+ .id_table = wil6210_pcie_ids,
+ .name = WIL_NAME,
+ .driver = {
+ .pm = &wil6210_pm_ops,
+ },
+};
+
+static int __init wil6210_driver_init(void)
+{
+ int rc;
+
+ rc = wil_platform_modinit();
+ if (rc)
+ return rc;
+
+ rc = pci_register_driver(&wil6210_driver);
+ if (rc)
+ wil_platform_modexit();
+ return rc;
+}
+module_init(wil6210_driver_init);
+
+static void __exit wil6210_driver_exit(void)
+{
+ pci_unregister_driver(&wil6210_driver);
+ wil_platform_modexit();
+}
+module_exit(wil6210_driver_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
new file mode 100644
index 000000000..f521af575
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "wil6210.h"
+#include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
+
+static void wil_pm_wake_connected_net_queues(struct wil6210_priv *wil)
+{
+ int i;
+
+ mutex_lock(&wil->vif_mutex);
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ struct wil6210_vif *vif = wil->vifs[i];
+
+ if (vif && test_bit(wil_vif_fwconnected, vif->status))
+ wil_update_net_queues_bh(wil, vif, NULL, false);
+ }
+ mutex_unlock(&wil->vif_mutex);
+}
+
+static void wil_pm_stop_all_net_queues(struct wil6210_priv *wil)
+{
+ int i;
+
+ mutex_lock(&wil->vif_mutex);
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ struct wil6210_vif *vif = wil->vifs[i];
+
+ if (vif)
+ wil_update_net_queues_bh(wil, vif, NULL, true);
+ }
+ mutex_unlock(&wil->vif_mutex);
+}
+
+static bool
+wil_can_suspend_vif(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ bool is_runtime)
+{
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg_pm(wil, "Sniffer\n");
+ return false;
+
+ /* for STA-like interface, don't runtime suspend */
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(wil_vif_fwconnecting, vif->status)) {
+ wil_dbg_pm(wil, "Delay suspend when connecting\n");
+ return false;
+ }
+ if (is_runtime) {
+ wil_dbg_pm(wil, "STA-like interface\n");
+ return false;
+ }
+ break;
+ /* AP-like interface - can't suspend */
+ default:
+ wil_dbg_pm(wil, "AP-like interface\n");
+ return false;
+ }
+
+ return true;
+}
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+ int rc = 0, i;
+ bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+ wil->fw_capabilities);
+ bool active_ifaces;
+
+ wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
+
+ if (wmi_only || debug_fw) {
+ wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+ wmi_only ? "wmi_only" : "debug_fw");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (is_runtime && !wil->platform_ops.suspend) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ mutex_lock(&wil->vif_mutex);
+ active_ifaces = wil_has_active_ifaces(wil, true, false);
+ mutex_unlock(&wil->vif_mutex);
+
+ if (!active_ifaces) {
+ /* can always sleep when down */
+ wil_dbg_pm(wil, "Interface is down\n");
+ goto out;
+ }
+ if (test_bit(wil_status_resetting, wil->status)) {
+ wil_dbg_pm(wil, "Delay suspend when resetting\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (wil->recovery_state != fw_recovery_idle) {
+ wil_dbg_pm(wil, "Delay suspend during recovery\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* interface is running */
+ mutex_lock(&wil->vif_mutex);
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
+ struct wil6210_vif *vif = wil->vifs[i];
+
+ if (!vif)
+ continue;
+ if (!wil_can_suspend_vif(wil, vif, is_runtime)) {
+ rc = -EBUSY;
+ mutex_unlock(&wil->vif_mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&wil->vif_mutex);
+
+out:
+ wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
+ is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+ if (rc)
+ wil->suspend_stats.rejected_by_host++;
+
+ return rc;
+}
+
+static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+
+ /* wil_status_resuming will be cleared when getting
+ * WMI_TRAFFIC_RESUME_EVENTID
+ */
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspended, wil->status);
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
+
+ wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+
+ /* Send WMI resume request to the device */
+ rc = wmi_resume(wil);
+ if (rc) {
+ wil_err(wil, "device failed to resume (%d)\n", rc);
+ if (no_fw_recovery)
+ goto out;
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down failed (%d)\n", rc);
+ goto out;
+ }
+ rc = wil_up(wil);
+ if (rc) {
+ wil_err(wil, "wil_up failed (%d)\n", rc);
+ goto out;
+ }
+ }
+
+ /* Wake all queues */
+ wil_pm_wake_connected_net_queues(wil);
+
+out:
+ if (rc)
+ set_bit(wil_status_suspended, wil->status);
+ return rc;
+}
+
+static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ unsigned long data_comp_to;
+
+ wil_dbg_pm(wil, "suspend keep radio on\n");
+
+ /* Prevent handling of new tx and wmi commands */
+ rc = down_write_trylock(&wil->mem_lock);
+ if (!rc) {
+ wil_err(wil,
+ "device is busy. down_write_trylock failed, returned (0x%x)\n",
+ rc);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
+
+ set_bit(wil_status_suspending, wil->status);
+ up_write(&wil->mem_lock);
+
+ wil_pm_stop_all_net_queues(wil);
+
+ if (!wil_is_tx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil->txrx_ops.is_rx_idle(wil)) {
+ wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ if (!wil_is_wmi_idle(wil)) {
+ wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
+ wil->suspend_stats.rejected_by_host++;
+ goto reject_suspend;
+ }
+
+ /* Send WMI suspend request to the device */
+ rc = wmi_suspend(wil);
+ if (rc) {
+ wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
+ rc);
+ goto reject_suspend;
+ }
+
+ /* Wait for completion of the pending RX packets */
+ data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil->txrx_ops.is_rx_idle(wil)) {
+ if (time_after(jiffies, data_comp_to)) {
+ if (wil->txrx_ops.is_rx_idle(wil))
+ break;
+ wil_err(wil,
+ "TO waiting for idle RX, suspend failed\n");
+ wil->suspend_stats.r_on.failed_suspends++;
+ goto resume_after_fail;
+ }
+ wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
+ napi_synchronize(&wil->napi_rx);
+ msleep(20);
+ }
+ }
+
+ /* In case of pending WMI events, reject the suspend
+ * and resume the device.
+ * This can happen if the device sent the WMI events before
+ * approving the suspend.
+ */
+ if (!wil_is_wmi_idle(wil)) {
+ wil_err(wil, "suspend failed due to pending WMI events\n");
+ wil->suspend_stats.r_on.failed_suspends++;
+ goto resume_after_fail;
+ }
+
+ wil_mask_irq(wil);
+
+ /* Disable device reset on PERST */
+ wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+ if (wil->platform_ops.suspend) {
+ rc = wil->platform_ops.suspend(wil->platform_handle, true);
+ if (rc) {
+ wil_err(wil, "platform device failed to suspend (%d)\n",
+ rc);
+ wil->suspend_stats.r_on.failed_suspends++;
+ wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wil_unmask_irq(wil);
+ goto resume_after_fail;
+ }
+ }
+
+ /* Save the current bus request to return to the same in resume */
+ wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
+ wil6210_bus_request(wil, 0);
+
+ set_bit(wil_status_suspended, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+
+ return rc;
+
+resume_after_fail:
+ set_bit(wil_status_resuming, wil->status);
+ clear_bit(wil_status_suspending, wil->status);
+ rc = wmi_resume(wil);
+ /* if resume succeeded, reject the suspend */
+ if (!rc) {
+ rc = -EBUSY;
+ wil_pm_wake_connected_net_queues(wil);
+ }
+ return rc;
+
+reject_suspend:
+ clear_bit(wil_status_suspending, wil->status);
+ wil_pm_wake_connected_net_queues(wil);
+ return -EBUSY;
+}
+
+static int wil_suspend_radio_off(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ bool active_ifaces;
+
+ wil_dbg_pm(wil, "suspend radio off\n");
+
+ rc = down_write_trylock(&wil->mem_lock);
+ if (!rc) {
+ wil_err(wil,
+ "device is busy. down_write_trylock failed, returned (0x%x)\n",
+ rc);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
+
+ set_bit(wil_status_suspending, wil->status);
+ up_write(&wil->mem_lock);
+
+ /* if netif up, hardware is alive, shut it down */
+ mutex_lock(&wil->vif_mutex);
+ active_ifaces = wil_has_active_ifaces(wil, true, false);
+ mutex_unlock(&wil->vif_mutex);
+
+ if (active_ifaces) {
+ rc = wil_down(wil);
+ if (rc) {
+ wil_err(wil, "wil_down : %d\n", rc);
+ wil->suspend_stats.r_off.failed_suspends++;
+ goto out;
+ }
+ }
+
+ /* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */
+ wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n");
+ wil_disable_irq(wil);
+
+ if (wil->platform_ops.suspend) {
+ rc = wil->platform_ops.suspend(wil->platform_handle, false);
+ if (rc) {
+ wil_enable_irq(wil);
+ wil->suspend_stats.r_off.failed_suspends++;
+ goto out;
+ }
+ }
+
+ set_bit(wil_status_suspended, wil->status);
+
+out:
+ clear_bit(wil_status_suspending, wil->status);
+ wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
+
+ return rc;
+}
+
+static int wil_resume_radio_off(struct wil6210_priv *wil)
+{
+ int rc = 0;
+ bool active_ifaces;
+
+ wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+ wil_enable_irq(wil);
+ /* if any netif up, bring hardware up
+ * During open(), IFF_UP set after actual device method
+ * invocation. This prevent recursive call to wil_up()
+ * wil_status_suspended will be cleared in wil_reset
+ */
+ mutex_lock(&wil->vif_mutex);
+ active_ifaces = wil_has_active_ifaces(wil, true, false);
+ mutex_unlock(&wil->vif_mutex);
+ if (active_ifaces)
+ rc = wil_up(wil);
+ else
+ clear_bit(wil_status_suspended, wil->status);
+
+ return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
+{
+ int rc = 0;
+
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
+ if (!keep_radio_on)
+ rc = wil_suspend_radio_off(wil);
+ else
+ rc = wil_suspend_keep_radio_on(wil);
+
+ wil_dbg_pm(wil, "suspend: %s => %d\n",
+ is_runtime ? "runtime" : "system", rc);
+
+ return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
+{
+ int rc = 0;
+
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
+
+ if (wil->platform_ops.resume) {
+ rc = wil->platform_ops.resume(wil->platform_handle,
+ keep_radio_on);
+ if (rc) {
+ wil_err(wil, "platform_ops.resume : %d\n", rc);
+ goto out;
+ }
+ }
+
+ if (keep_radio_on)
+ rc = wil_resume_keep_radio_on(wil);
+ else
+ rc = wil_resume_radio_off(wil);
+
+out:
+ wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+ rc);
+ return rc;
+}
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_forbid(dev);
+ pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+ int rc;
+ struct device *dev = wil_to_dev(wil);
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc < 0) {
+ wil_err(wil, "pm_runtime_resume_and_get() failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
new file mode 100644
index 000000000..a2f7b4c1d
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include "wmi.h"
+#include "wil6210.h"
+#include "txrx.h"
+#include "pmc.h"
+
+struct desc_alloc_info {
+ dma_addr_t pa;
+ void *va;
+};
+
+static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
+{
+ return !!pmc->pring_va;
+}
+
+void wil_pmc_init(struct wil6210_priv *wil)
+{
+ memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
+ mutex_init(&wil->pmc.lock);
+}
+
+/* Allocate the physical ring (p-ring) and the required
+ * number of descriptors of required size.
+ * Initialize the descriptors as required by pmc dma.
+ * The descriptors' buffers dwords are initialized to hold
+ * dword's serial number in the lsw and reserved value
+ * PCM_DATA_INVALID_DW_VAL in the msw.
+ */
+void wil_pmc_alloc(struct wil6210_priv *wil,
+ int num_descriptors,
+ int descriptor_size)
+{
+ u32 i;
+ struct pmc_ctx *pmc = &wil->pmc;
+ struct device *dev = wil_to_dev(wil);
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_pmc_cmd pmc_cmd = {0};
+ int last_cmd_err = -ENOMEM;
+
+ mutex_lock(&pmc->lock);
+
+ if (wil_is_pmc_allocated(pmc)) {
+ /* sanity check */
+ wil_err(wil, "ERROR pmc is already allocated\n");
+ goto no_release_err;
+ }
+ if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
+ wil_err(wil,
+ "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
+ num_descriptors, descriptor_size);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
+ wil_err(wil,
+ "num_descriptors(%d) exceeds max ring size %d\n",
+ num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ if (num_descriptors > INT_MAX / descriptor_size) {
+ wil_err(wil,
+ "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
+ num_descriptors, descriptor_size);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ pmc->num_descriptors = num_descriptors;
+ pmc->descriptor_size = descriptor_size;
+
+ wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
+ num_descriptors, descriptor_size);
+
+ /* allocate descriptors info list in pmc context*/
+ pmc->descriptors = kcalloc(num_descriptors,
+ sizeof(struct desc_alloc_info),
+ GFP_KERNEL);
+ if (!pmc->descriptors) {
+ wil_err(wil, "ERROR allocating pmc skb list\n");
+ goto no_release_err;
+ }
+
+ wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
+ pmc->descriptors);
+
+ /* Allocate pring buffer and descriptors.
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent.
+ *
+ * HW has limitation that all vrings addresses must share the same
+ * upper 16 msb bits part of 48 bits address. To workaround that,
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
+ *
+ * There's no check for the return value of dma_set_mask_and_coherent,
+ * since we assume if we were able to set the mask during
+ * initialization in this system it will not fail if we set it again
+ */
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ pmc->pring_va = dma_alloc_coherent(dev,
+ sizeof(struct vring_tx_desc) * num_descriptors,
+ &pmc->pring_pa,
+ GFP_KERNEL);
+
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
+
+ wil_dbg_misc(wil,
+ "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
+ pmc->pring_va, &pmc->pring_pa,
+ sizeof(struct vring_tx_desc),
+ num_descriptors,
+ sizeof(struct vring_tx_desc) * num_descriptors);
+
+ if (!pmc->pring_va) {
+ wil_err(wil, "ERROR allocating pmc pring\n");
+ goto release_pmc_skb_list;
+ }
+
+ /* initially, all descriptors are SW owned
+ * For Tx, Rx, and PMC, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < num_descriptors; i++) {
+ struct vring_tx_desc *_d = &pmc->pring_va[i];
+ struct vring_tx_desc dd = {}, *d = &dd;
+ int j = 0;
+
+ pmc->descriptors[i].va = dma_alloc_coherent(dev,
+ descriptor_size,
+ &pmc->descriptors[i].pa,
+ GFP_KERNEL);
+
+ if (unlikely(!pmc->descriptors[i].va)) {
+ wil_err(wil, "ERROR allocating pmc descriptor %d", i);
+ goto release_pmc_skbs;
+ }
+
+ for (j = 0; j < descriptor_size / sizeof(u32); j++) {
+ u32 *p = (u32 *)pmc->descriptors[i].va + j;
+ *p = PCM_DATA_INVALID_DW_VAL | j;
+ }
+
+ /* configure dma descriptor */
+ d->dma.addr.addr_low =
+ cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
+ d->dma.addr.addr_high =
+ cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
+ d->dma.status = 0; /* 0 = HW_OWNED */
+ d->dma.length = cpu_to_le16(descriptor_size);
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ *_d = *d;
+ }
+
+ wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
+
+ pmc_cmd.op = WMI_PMC_ALLOCATE;
+ pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
+ pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
+
+ wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
+ pmc->last_cmd_status = wmi_send(wil,
+ WMI_PMC_CMDID,
+ vif->mid,
+ &pmc_cmd,
+ sizeof(pmc_cmd));
+ if (pmc->last_cmd_status) {
+ wil_err(wil,
+ "WMI_PMC_CMD with ALLOCATE op failed with status %d",
+ pmc->last_cmd_status);
+ goto release_pmc_skbs;
+ }
+
+ mutex_unlock(&pmc->lock);
+
+ return;
+
+release_pmc_skbs:
+ wil_err(wil, "exit on error: Releasing skbs...\n");
+ for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
+ dma_free_coherent(dev,
+ descriptor_size,
+ pmc->descriptors[i].va,
+ pmc->descriptors[i].pa);
+
+ pmc->descriptors[i].va = NULL;
+ }
+ wil_err(wil, "exit on error: Releasing pring...\n");
+
+ dma_free_coherent(dev,
+ sizeof(struct vring_tx_desc) * num_descriptors,
+ pmc->pring_va,
+ pmc->pring_pa);
+
+ pmc->pring_va = NULL;
+
+release_pmc_skb_list:
+ wil_err(wil, "exit on error: Releasing descriptors info list...\n");
+ kfree(pmc->descriptors);
+ pmc->descriptors = NULL;
+
+no_release_err:
+ pmc->last_cmd_status = last_cmd_err;
+ mutex_unlock(&pmc->lock);
+}
+
+/* Traverse the p-ring and release all buffers.
+ * At the end release the p-ring memory
+ */
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
+{
+ struct pmc_ctx *pmc = &wil->pmc;
+ struct device *dev = wil_to_dev(wil);
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_pmc_cmd pmc_cmd = {0};
+
+ mutex_lock(&pmc->lock);
+
+ pmc->last_cmd_status = 0;
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_dbg_misc(wil,
+ "pmc_free: Error, can't free - not allocated\n");
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return;
+ }
+
+ if (send_pmc_cmd) {
+ wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
+ pmc_cmd.op = WMI_PMC_RELEASE;
+ pmc->last_cmd_status =
+ wmi_send(wil, WMI_PMC_CMDID, vif->mid,
+ &pmc_cmd, sizeof(pmc_cmd));
+ if (pmc->last_cmd_status) {
+ wil_err(wil,
+ "WMI_PMC_CMD with RELEASE op failed, status %d",
+ pmc->last_cmd_status);
+ /* There's nothing we can do with this error.
+ * Normally, it should never occur.
+ * Continue to freeing all memory allocated for pmc.
+ */
+ }
+ }
+
+ if (pmc->pring_va) {
+ size_t buf_size = sizeof(struct vring_tx_desc) *
+ pmc->num_descriptors;
+
+ wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
+ pmc->pring_va);
+ dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
+
+ pmc->pring_va = NULL;
+ } else {
+ pmc->last_cmd_status = -ENOENT;
+ }
+
+ if (pmc->descriptors) {
+ int i;
+
+ for (i = 0;
+ i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
+ dma_free_coherent(dev,
+ pmc->descriptor_size,
+ pmc->descriptors[i].va,
+ pmc->descriptors[i].pa);
+ pmc->descriptors[i].va = NULL;
+ }
+ wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
+ pmc->num_descriptors);
+ wil_dbg_misc(wil,
+ "pmc_free: free pmc descriptors info list %p\n",
+ pmc->descriptors);
+ kfree(pmc->descriptors);
+ pmc->descriptors = NULL;
+ } else {
+ pmc->last_cmd_status = -ENOENT;
+ }
+
+ mutex_unlock(&pmc->lock);
+}
+
+/* Status of the last operation requested via debugfs: alloc/free/read.
+ * 0 - success or negative errno
+ */
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
+ wil->pmc.last_cmd_status);
+
+ return wil->pmc.last_cmd_status;
+}
+
+/* Read from required position up to the end of current descriptor,
+ * depends on descriptor size configured during alloc request.
+ */
+ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct wil6210_priv *wil = filp->private_data;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t retval = 0;
+ unsigned long long idx;
+ loff_t offset;
+ size_t pmc_size;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "error, pmc is not allocated!\n");
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+ wil_dbg_misc(wil,
+ "pmc_read: size %u, pos %lld\n",
+ (u32)count, *f_pos);
+
+ pmc->last_cmd_status = 0;
+
+ idx = *f_pos;
+ do_div(idx, pmc->descriptor_size);
+ offset = *f_pos - (idx * pmc->descriptor_size);
+
+ if (*f_pos >= pmc_size) {
+ wil_dbg_misc(wil,
+ "pmc_read: reached end of pmc buf: %lld >= %u\n",
+ *f_pos, (u32)pmc_size);
+ pmc->last_cmd_status = -ERANGE;
+ goto out;
+ }
+
+ wil_dbg_misc(wil,
+ "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+ *f_pos, idx, offset, count);
+
+ /* if no errors, return the copied byte count */
+ retval = simple_read_from_buffer(buf,
+ count,
+ &offset,
+ pmc->descriptors[idx].va,
+ pmc->descriptor_size);
+ *f_pos += retval;
+out:
+ mutex_unlock(&pmc->lock);
+
+ return retval;
+}
+
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+ struct wil6210_priv *wil = filp->private_data;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t pmc_size;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "error, pmc is not allocated!\n");
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+ switch (whence) {
+ case 0: /* SEEK_SET */
+ newpos = off;
+ break;
+
+ case 1: /* SEEK_CUR */
+ newpos = filp->f_pos + off;
+ break;
+
+ case 2: /* SEEK_END */
+ newpos = pmc_size;
+ break;
+
+ default: /* can't happen */
+ newpos = -EINVAL;
+ goto out;
+ }
+
+ if (newpos < 0) {
+ newpos = -EINVAL;
+ goto out;
+ }
+ if (newpos > pmc_size)
+ newpos = pmc_size;
+
+ filp->f_pos = newpos;
+
+out:
+ mutex_unlock(&pmc->lock);
+
+ return newpos;
+}
+
+int wil_pmcring_read(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t pmc_ring_size =
+ sizeof(struct vring_rx_desc) * pmc->num_descriptors;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "error, pmc is not allocated!\n");
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
+
+ seq_write(s, pmc->pring_va, pmc_ring_size);
+
+ mutex_unlock(&pmc->lock);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
new file mode 100644
index 000000000..b3d79eb50
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (c) 2012-2015 Qualcomm Atheros, Inc. */
+
+#include <linux/types.h>
+
+#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
+
+void wil_pmc_init(struct wil6210_priv *wil);
+void wil_pmc_alloc(struct wil6210_priv *wil,
+ int num_descriptors, int descriptor_size);
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
+ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);
+int wil_pmcring_read(struct seq_file *s, void *data);
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
new file mode 100644
index 000000000..d385bc030
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "wil6210.h"
+#include "txrx.h"
+
+#define SEQ_MODULO 0x1000
+#define SEQ_MASK 0xfff
+
+static inline int seq_less(u16 sq1, u16 sq2)
+{
+ return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
+}
+
+static inline u16 seq_inc(u16 sq)
+{
+ return (sq + 1) & SEQ_MASK;
+}
+
+static inline u16 seq_sub(u16 sq1, u16 sq2)
+{
+ return (sq1 - sq2) & SEQ_MASK;
+}
+
+static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
+{
+ return seq_sub(seq, r->ssn) % r->buf_size;
+}
+
+static void wil_release_reorder_frame(struct net_device *ndev,
+ struct wil_tid_ampdu_rx *r,
+ int index)
+{
+ struct sk_buff *skb = r->reorder_buf[index];
+
+ if (!skb)
+ goto no_frame;
+
+ /* release the frame from the reorder ring buffer */
+ r->stored_mpdu_num--;
+ r->reorder_buf[index] = NULL;
+ wil_netif_rx_any(skb, ndev);
+
+no_frame:
+ r->head_seq_num = seq_inc(r->head_seq_num);
+}
+
+static void wil_release_reorder_frames(struct net_device *ndev,
+ struct wil_tid_ampdu_rx *r,
+ u16 hseq)
+{
+ int index;
+
+ /* note: this function is never called with
+ * hseq preceding r->head_seq_num, i.e it is always true
+ * !seq_less(hseq, r->head_seq_num)
+ * and thus on loop exit it should be
+ * r->head_seq_num == hseq
+ */
+ while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
+ index = reorder_index(r, r->head_seq_num);
+ wil_release_reorder_frame(ndev, r, index);
+ }
+ r->head_seq_num = hseq;
+}
+
+static void wil_reorder_release(struct net_device *ndev,
+ struct wil_tid_ampdu_rx *r)
+{
+ int index = reorder_index(r, r->head_seq_num);
+
+ while (r->reorder_buf[index]) {
+ wil_release_reorder_frame(ndev, r, index);
+ index = reorder_index(r, r->head_seq_num);
+ }
+}
+
+/* called in NAPI context */
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wil6210_vif *vif;
+ struct net_device *ndev;
+ int tid, cid, mid, mcast, retry;
+ u16 seq;
+ struct wil_sta_info *sta;
+ struct wil_tid_ampdu_rx *r;
+ u16 hseq;
+ int index;
+
+ wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
+ &mcast, &retry);
+ sta = &wil->sta[cid];
+
+ wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
+ mid, cid, tid, seq, mcast);
+
+ vif = wil->vifs[mid];
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "invalid VIF, mid %d\n", mid);
+ dev_kfree_skb(skb);
+ return;
+ }
+ ndev = vif_to_ndev(vif);
+
+ spin_lock(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ if (!r) {
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ if (unlikely(mcast)) {
+ if (retry && seq == r->mcast_last_seq) {
+ r->drop_dup_mcast++;
+ wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
+ seq);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ r->mcast_last_seq = seq;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ r->total++;
+ hseq = r->head_seq_num;
+
+ /** Due to the race between WMI events, where BACK establishment
+ * reported, and data Rx, few packets may be pass up before reorder
+ * buffer get allocated. Catch up by pretending SSN is what we
+ * see in the 1-st Rx packet
+ *
+ * Another scenario, Rx get delayed and we got packet from before
+ * BACK. Pass it to the stack and wait.
+ */
+ if (r->first_time) {
+ r->first_time = false;
+ if (seq != r->head_seq_num) {
+ if (seq_less(seq, r->head_seq_num)) {
+ wil_err(wil,
+ "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
+ seq, r->head_seq_num);
+ r->first_time = true;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+ wil_err(wil,
+ "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
+ seq, r->head_seq_num);
+ r->head_seq_num = seq;
+ r->ssn = seq;
+ }
+ }
+
+ /* frame with out of date sequence number */
+ if (seq_less(seq, r->head_seq_num)) {
+ r->ssn_last_drop = seq;
+ r->drop_old++;
+ wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+ seq, r->head_seq_num);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If frame the sequence number exceeds our buffering window
+ * size release some previous frames to make room for this one.
+ */
+ if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
+ hseq = seq_inc(seq_sub(seq, r->buf_size));
+ /* release stored frames up to new head to stack */
+ wil_release_reorder_frames(ndev, r, hseq);
+ }
+
+ /* Now the new frame is always in the range of the reordering buffer */
+
+ index = reorder_index(r, seq);
+
+ /* check if we already stored this frame */
+ if (r->reorder_buf[index]) {
+ r->drop_dup++;
+ wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If the current MPDU is in the right order and nothing else
+ * is stored we can process it directly, no need to buffer it.
+ * If it is first but there's something stored, we may be able
+ * to release frames after this one.
+ */
+ if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
+ r->head_seq_num = seq_inc(r->head_seq_num);
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ /* put the frame in the reordering buffer */
+ r->reorder_buf[index] = skb;
+ r->stored_mpdu_num++;
+ wil_reorder_release(ndev, r);
+
+out:
+ spin_unlock(&sta->tid_rx_lock);
+}
+
+/* process BAR frame, called in NAPI context */
+void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ u8 cid, u8 tid, u16 seq)
+{
+ struct wil_sta_info *sta = &wil->sta[cid];
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wil_tid_ampdu_rx *r;
+
+ spin_lock(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ if (!r) {
+ wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
+ goto out;
+ }
+ if (seq_less(seq, r->head_seq_num)) {
+ wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
+ seq, r->head_seq_num);
+ goto out;
+ }
+ wil_dbg_txrx(wil, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n",
+ cid, vif->mid, tid, seq, r->head_seq_num);
+ wil_release_reorder_frames(ndev, r, seq);
+
+out:
+ spin_unlock(&sta->tid_rx_lock);
+}
+
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn)
+{
+ struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
+
+ if (!r)
+ return NULL;
+
+ r->reorder_buf =
+ kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
+ if (!r->reorder_buf) {
+ kfree(r);
+ return NULL;
+ }
+
+ r->ssn = ssn;
+ r->head_seq_num = ssn;
+ r->buf_size = size;
+ r->stored_mpdu_num = 0;
+ r->first_time = true;
+ r->mcast_last_seq = U16_MAX;
+ return r;
+}
+
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ int i;
+
+ if (!r)
+ return;
+
+ /* Do not pass remaining frames to the network stack - it may be
+ * not expecting to get any more Rx. Rx from here may lead to
+ * kernel OOPS since some per-socket accounting info was already
+ * released.
+ */
+ for (i = 0; i < r->buf_size; i++)
+ kfree_skb(r->reorder_buf[i]);
+
+ kfree(r->reorder_buf);
+ kfree(r);
+}
+
+/* ADDBA processing */
+static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
+{
+ u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
+ (mtu_max + WIL_MAX_MPDU_OVERHEAD));
+
+ if (!req_agg_wsize)
+ return max_agg_size;
+
+ return min(max_agg_size, req_agg_wsize);
+}
+
+/* Block Ack - Rx side (recipient) */
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ u16 param_set = le16_to_cpu(ba_param_set);
+ u16 agg_timeout = le16_to_cpu(ba_timeout);
+ u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
+ struct wil_sta_info *sta;
+ u16 agg_wsize;
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
+ bool agg_amsdu = wil->use_enhanced_dma_hw &&
+ wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en && (param_set & BIT(0));
+ int ba_policy = param_set & BIT(1);
+ u16 ssn = seq_ctrl >> 4;
+ struct wil_tid_ampdu_rx *r;
+ int rc = 0;
+
+ might_sleep();
+
+ /* sanity checks */
+ if (cid >= wil->max_assoc_sta) {
+ wil_err(wil, "BACK: invalid CID %d\n", cid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ sta = &wil->sta[cid];
+ if (sta->status != wil_sta_connected) {
+ wil_err(wil, "BACK: CID %d not connected\n", cid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil,
+ "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
+ cid, sta->addr, tid, req_agg_wsize, agg_timeout,
+ agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
+
+ /* apply policies */
+ if (req_agg_wsize == 0) {
+ wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
+ wil->max_agg_wsize);
+ agg_wsize = wil->max_agg_wsize;
+ } else {
+ agg_wsize = min_t(u16, wil->max_agg_wsize, req_agg_wsize);
+ }
+
+ rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
+ WLAN_STATUS_SUCCESS, agg_amsdu,
+ agg_wsize, agg_timeout);
+ if (rc) {
+ wil_err(wil, "do not apply ba, rc(%d)\n", rc);
+ goto out;
+ }
+
+ /* apply */
+ if (!wil->use_rx_hw_reordering) {
+ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
+ spin_lock_bh(&sta->tid_rx_lock);
+ wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
+ sta->tid_rx[tid] = r;
+ spin_unlock_bh(&sta->tid_rx_lock);
+ }
+
+out:
+ return rc;
+}
+
+/* BACK - Tx side (originator) */
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
+{
+ u8 agg_wsize = wil_agg_size(wil, wsize);
+ u16 agg_timeout = 0;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
+ int rc = 0;
+
+ if (txdata->addba_in_progress) {
+ wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
+ ringid);
+ goto out;
+ }
+ if (txdata->agg_wsize) {
+ wil_dbg_misc(wil,
+ "ADDBA for vring[%d] already done for wsize %d\n",
+ ringid, txdata->agg_wsize);
+ goto out;
+ }
+ txdata->addba_in_progress = true;
+ rc = wmi_addba(wil, txdata->mid, ringid, agg_wsize, agg_timeout);
+ if (rc) {
+ wil_err(wil, "wmi_addba failed, rc (%d)", rc);
+ txdata->addba_in_progress = false;
+ }
+
+out:
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
new file mode 100644
index 000000000..6909e989b
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
new file mode 100644
index 000000000..201f44612
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wil6210
+#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define WIL6210_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "wil6210.h"
+#include "txrx.h"
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
+
+DECLARE_EVENT_CLASS(wil6210_wmi,
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
+
+ TP_ARGS(wmi, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(u8, mid)
+ __field(u16, command_id)
+ __field(u32, fw_timestamp)
+ __field(u16, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->mid = wmi->mid;
+ __entry->command_id = le16_to_cpu(wmi->command_id);
+ __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "MID %d id 0x%04x len %d timestamp %d",
+ __entry->mid, __entry->command_id, __entry->buf_len,
+ __entry->fw_timestamp
+ )
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
+);
+
+#define WIL6210_MSG_MAX (200)
+
+DECLARE_EVENT_CLASS(wil6210_log_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+#define wil_pseudo_irq_cause(x) __print_flags(x, "|", \
+ {BIT_DMA_PSEUDO_CAUSE_RX, "Rx" }, \
+ {BIT_DMA_PSEUDO_CAUSE_TX, "Tx" }, \
+ {BIT_DMA_PSEUDO_CAUSE_MISC, "Misc" })
+
+TRACE_EVENT(wil6210_irq_pseudo,
+ TP_PROTO(u32 x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(
+ __field(u32, x)
+ ),
+ TP_fast_assign(
+ __entry->x = x;
+ ),
+ TP_printk("cause 0x%08x : %s", __entry->x,
+ wil_pseudo_irq_cause(__entry->x))
+);
+
+DECLARE_EVENT_CLASS(wil6210_irq,
+ TP_PROTO(u32 x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(
+ __field(u32, x)
+ ),
+ TP_fast_assign(
+ __entry->x = x;
+ ),
+ TP_printk("cause 0x%08x", __entry->x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_rx,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_tx,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+TRACE_EVENT(wil6210_rx,
+ TP_PROTO(u16 index, struct vring_rx_desc *d),
+ TP_ARGS(index, d),
+ TP_STRUCT__entry(
+ __field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->len = d->dma.length;
+ __entry->mid = wil_rxdesc_mid(d);
+ __entry->cid = wil_rxdesc_cid(d);
+ __entry->tid = wil_rxdesc_tid(d);
+ __entry->type = wil_rxdesc_ftype(d);
+ __entry->subtype = wil_rxdesc_subtype(d);
+ __entry->seq = wil_rxdesc_seq(d);
+ __entry->mcs = wil_rxdesc_mcs(d);
+ ),
+ TP_printk("index %d len %d mid %d cid (%%8) %d tid %d mcs %d seq 0x%03x"
+ " type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
+TRACE_EVENT(wil6210_rx_status,
+ TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
+ void *msg),
+ TP_ARGS(wil, use_compressed, buff_id, msg),
+ TP_STRUCT__entry(__field(u8, use_compressed)
+ __field(u16, buff_id)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(__entry->use_compressed = use_compressed;
+ __entry->buff_id = buff_id;
+ __entry->len = wil_rx_status_get_length(msg);
+ __entry->mid = wil_rx_status_get_mid(msg);
+ __entry->cid = wil_rx_status_get_cid(msg);
+ __entry->tid = wil_rx_status_get_tid(msg);
+ __entry->type = wil_rx_status_get_frame_type(wil,
+ msg);
+ __entry->subtype = wil_rx_status_get_fc1(wil, msg);
+ __entry->seq = wil_rx_status_get_seq(wil, msg);
+ __entry->mcs = wil_rx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
+ __entry->use_compressed, __entry->buff_id, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
+TRACE_EVENT(wil6210_tx,
+ TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
+ TP_ARGS(vring, index, len, frags),
+ TP_STRUCT__entry(
+ __field(u8, vring)
+ __field(u8, frags)
+ __field(u16, index)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->vring = vring;
+ __entry->frags = frags;
+ __entry->index = index;
+ __entry->len = len;
+ ),
+ TP_printk("vring %d index %d len %d frags %d",
+ __entry->vring, __entry->index, __entry->len, __entry->frags)
+);
+
+TRACE_EVENT(wil6210_tx_done,
+ TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err),
+ TP_ARGS(vring, index, len, err),
+ TP_STRUCT__entry(
+ __field(u8, vring)
+ __field(u8, err)
+ __field(u16, index)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->vring = vring;
+ __entry->index = index;
+ __entry->len = len;
+ __entry->err = err;
+ ),
+ TP_printk("vring %d index %d len %d err 0x%02x",
+ __entry->vring, __entry->index, __entry->len,
+ __entry->err)
+);
+
+TRACE_EVENT(wil6210_tx_status,
+ TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
+ unsigned int len),
+ TP_ARGS(msg, index, len),
+ TP_STRUCT__entry(__field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, num_descs)
+ __field(u8, ring_id)
+ __field(u8, status)
+ __field(u8, mcs)
+
+ ),
+ TP_fast_assign(__entry->index = index;
+ __entry->len = len;
+ __entry->num_descs = msg->num_descriptors;
+ __entry->ring_id = msg->ring_id;
+ __entry->status = msg->status;
+ __entry->mcs = wil_tx_status_get_mcs(msg);
+ ),
+ TP_printk(
+ "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
+ __entry->ring_id, __entry->index, __entry->len,
+ __entry->num_descs, __entry->status, __entry->mcs)
+);
+
+#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
+
+#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644
index 000000000..f29ac6de7
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -0,0 +1,2589 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ipv6.h>
+#include <linux/prefetch.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+#include "trace.h"
+#include "txrx_edma.h"
+
+bool rx_align_2;
+module_param(rx_align_2, bool, 0444);
+MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+
+bool rx_large_buf;
+module_param(rx_large_buf, bool, 0444);
+MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
+
+/* Drop Tx packets in case Tx ring is full */
+bool drop_if_ring_full;
+
+static inline uint wil_rx_snaplen(void)
+{
+ return rx_align_2 ? 6 : 0;
+}
+
+/* wil_ring_wmark_low - low watermark for available descriptor space */
+static inline int wil_ring_wmark_low(struct wil_ring *ring)
+{
+ return ring->size / 8;
+}
+
+/* wil_ring_wmark_high - high watermark for available descriptor space */
+static inline int wil_ring_wmark_high(struct wil_ring *ring)
+{
+ return ring->size / 4;
+}
+
+/* returns true if num avail descriptors is lower than wmark_low */
+static inline int wil_ring_avail_low(struct wil_ring *ring)
+{
+ return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
+}
+
+/* returns true if num avail descriptors is higher than wmark_high */
+static inline int wil_ring_avail_high(struct wil_ring *ring)
+{
+ return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
+}
+
+/* returns true when all tx vrings are empty */
+bool wil_is_tx_idle(struct wil6210_priv *wil)
+{
+ int i;
+ unsigned long data_comp_to;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct wil_ring *vring = &wil->ring_tx[i];
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[vring_index];
+
+ spin_lock(&txdata->lock);
+
+ if (!vring->va || !txdata->enabled) {
+ spin_unlock(&txdata->lock);
+ continue;
+ }
+
+ data_comp_to = jiffies + msecs_to_jiffies(
+ WIL_DATA_COMPLETION_TO_MS);
+ if (test_bit(wil_status_napi_en, wil->status)) {
+ while (!wil_ring_is_empty(vring)) {
+ if (time_after(jiffies, data_comp_to)) {
+ wil_dbg_pm(wil,
+ "TO waiting for idle tx\n");
+ spin_unlock(&txdata->lock);
+ return false;
+ }
+ wil_dbg_ratelimited(wil,
+ "tx vring is not empty -> NAPI\n");
+ spin_unlock(&txdata->lock);
+ napi_synchronize(&wil->napi_tx);
+ msleep(20);
+ spin_lock(&txdata->lock);
+ if (!vring->va || !txdata->enabled)
+ break;
+ }
+ }
+
+ spin_unlock(&txdata->lock);
+ }
+
+ return true;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+ uint i;
+
+ wil_dbg_misc(wil, "vring_alloc:\n");
+
+ BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+ vring->swhead = 0;
+ vring->swtail = 0;
+ vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
+ if (!vring->ctx) {
+ vring->va = NULL;
+ return -ENOMEM;
+ }
+
+ /* vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent.
+ *
+ * HW has limitation that all vrings addresses must share the same
+ * upper 16 msb bits part of 48 bits address. To workaround that,
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
+ *
+ * There's no check for the return value of dma_set_mask_and_coherent,
+ * since we assume if we were able to set the mask during
+ * initialization in this system it will not fail if we set it again
+ */
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+ if (!vring->va) {
+ kfree(vring->ctx);
+ vring->ctx = NULL;
+ return -ENOMEM;
+ }
+
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
+
+ /* initially, all descriptors are SW owned
+ * For Tx and Rx, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *_d =
+ &vring->va[i].tx.legacy;
+
+ _d->dma.status = TX_DMA_STATUS_DU;
+ }
+
+ wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
+ vring->va, &vring->pa, vring->ctx);
+
+ return 0;
+}
+
+static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
+ struct wil_ctx *ctx)
+{
+ struct vring_tx_desc *d = &desc->legacy;
+ dma_addr_t pa = wil_desc_addr(&d->dma.addr);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+
+ lockdep_assert_held(&wil->mutex);
+ if (!vring->is_rx) {
+ int vring_index = vring - wil->ring_tx;
+
+ wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
+ vring_index, vring->size, vring->va,
+ &vring->pa, vring->ctx);
+ } else {
+ wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
+ vring->size, vring->va,
+ &vring->pa, vring->ctx);
+ }
+
+ while (!wil_ring_is_empty(vring)) {
+ dma_addr_t pa;
+ u16 dmalen;
+ struct wil_ctx *ctx;
+
+ if (!vring->is_rx) {
+ struct vring_tx_desc dd, *d = &dd;
+ volatile struct vring_tx_desc *_d =
+ &vring->va[vring->swtail].tx.legacy;
+
+ ctx = &vring->ctx[vring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ vring->swtail);
+ vring->swtail = wil_ring_next_tail(vring);
+ continue;
+ }
+ *d = *_d;
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ vring->swtail = wil_ring_next_tail(vring);
+ } else { /* rx */
+ struct vring_rx_desc dd, *d = &dd;
+ volatile struct vring_rx_desc *_d =
+ &vring->va[vring->swhead].rx.legacy;
+
+ ctx = &vring->ctx[vring->swhead];
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+ dmalen = le16_to_cpu(d->dma.length);
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+ kfree_skb(ctx->skb);
+ wil_ring_advance_head(vring, 1);
+ }
+ }
+ dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+ kfree(vring->ctx);
+ vring->pa = 0;
+ vring->va = NULL;
+ vring->ctx = NULL;
+}
+
+/* Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
+ u32 i, int headroom)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
+ struct vring_rx_desc dd, *d = &dd;
+ volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
+ dma_addr_t pa;
+ struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, sz);
+
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
+ wil_desc_addr_set(&d->dma.addr, pa);
+ /* ip_length don't care */
+ /* b11 don't care */
+ /* error don't care */
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16(sz);
+ *_d = *d;
+ vring->ctx[i].skb = skb;
+
+ return 0;
+}
+
+/* Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ * - Rx descriptor: 32 bytes
+ * - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wil6210_rtap {
+ struct ieee80211_radiotap_header rthdr;
+ /* fields should be in the order of bits in rthdr.it_present */
+ /* flags */
+ u8 flags;
+ /* channel */
+ __le16 chnl_freq __aligned(2);
+ __le16 chnl_flags;
+ /* MCS */
+ u8 mcs_present;
+ u8 mcs_flags;
+ u8 mcs_index;
+ } __packed;
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ struct wil6210_rtap *rtap;
+ int rtap_len = sizeof(struct wil6210_rtap);
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
+
+ if (skb_headroom(skb) < rtap_len &&
+ pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+ wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
+ return;
+ }
+
+ rtap = skb_push(skb, rtap_len);
+ memset(rtap, 0, rtap_len);
+
+ rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap->rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_MCS));
+ if (d->dma.status & RX_DMA_STATUS_ERROR)
+ rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap->chnl_flags = cpu_to_le16(0);
+
+ rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap->mcs_flags = 0;
+ rtap->mcs_index = wil_rxdesc_mcs(d);
+}
+
+static bool wil_is_rx_idle(struct wil6210_priv *wil)
+{
+ struct vring_rx_desc *_d;
+ struct wil_ring *ring = &wil->ring_rx;
+
+ _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
+ if (_d->dma.status & RX_DMA_STATUS_DU)
+ return false;
+
+ return true;
+}
+
+static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int mid = wil_rxdesc_mid(d);
+ struct wil6210_vif *vif = wil->vifs[mid];
+ /* cid from DMA descriptor is limited to 3 bits.
+ * In case of cid>=8, the value would be cid modulo 8 and we need to
+ * find real cid by locating the transmitter (ta) inside sta array
+ */
+ int cid = wil_rxdesc_cid(d);
+ unsigned int snaplen = wil_rx_snaplen();
+ struct ieee80211_hdr_3addr *hdr;
+ int i;
+ unsigned char *ta;
+ u8 ftype;
+
+ /* in monitor mode there are no connections */
+ if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
+ return cid;
+
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (likely(ftype == IEEE80211_FTYPE_DATA)) {
+ if (unlikely(skb->len < ETH_HLEN + snaplen)) {
+ wil_err_ratelimited(wil,
+ "Short data frame, len = %d\n",
+ skb->len);
+ return -ENOENT;
+ }
+ ta = wil_skb_get_sa(skb);
+ } else {
+ if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
+ wil_err_ratelimited(wil, "Short frame, len = %d\n",
+ skb->len);
+ return -ENOENT;
+ }
+ hdr = (void *)skb->data;
+ ta = hdr->addr2;
+ }
+
+ if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
+ return cid;
+
+ /* assuming no concurrency between AP interfaces and STA interfaces.
+ * multista is used only in P2P_GO or AP mode. In other modes return
+ * cid from the rx descriptor
+ */
+ if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
+ vif->wdev.iftype != NL80211_IFTYPE_AP)
+ return cid;
+
+ /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
+ * to find the real cid, compare transmitter address with the stored
+ * stations mac address in the driver sta array
+ */
+ for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
+ if (wil->sta[i].status != wil_sta_unused &&
+ ether_addr_equal(wil->sta[i].addr, ta)) {
+ cid = i;
+ break;
+ }
+ }
+ if (i >= wil->max_assoc_sta) {
+ wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
+ ta, vif->wdev.iftype, ftype, skb->len);
+ cid = -ENOENT;
+ }
+
+ return cid;
+}
+
+/* reap 1 frame from @swhead
+ *
+ * Rx descriptor copied to skb->cb
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+ struct wil_ring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil6210_vif *vif;
+ struct net_device *ndev;
+ volatile struct vring_rx_desc *_d;
+ struct vring_rx_desc *d;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ unsigned int snaplen = wil_rx_snaplen();
+ unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
+ u16 dmalen;
+ u8 ftype;
+ int cid, mid;
+ int i;
+ struct wil_net_stats *stats;
+
+ BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
+
+again:
+ if (unlikely(wil_ring_is_empty(vring)))
+ return NULL;
+
+ i = (int)vring->swhead;
+ _d = &vring->va[i].rx.legacy;
+ if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
+ /* it is not error, we just reached end of Rx done area */
+ return NULL;
+ }
+
+ skb = vring->ctx[i].skb;
+ vring->ctx[i].skb = NULL;
+ wil_ring_advance_head(vring, 1);
+ if (!skb) {
+ wil_err(wil, "No Rx skb at [%d]\n", i);
+ goto again;
+ }
+ d = wil_skb_rxdesc(skb);
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ dmalen = le16_to_cpu(d->dma.length);
+
+ trace_wil6210_rx(i, d);
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
+ wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ mid = wil_rxdesc_mid(d);
+ vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
+ mid);
+ kfree_skb(skb);
+ goto again;
+ }
+ ndev = vif_to_ndev(vif);
+ if (unlikely(dmalen > sz)) {
+ wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
+ dmalen);
+ kfree_skb(skb);
+ goto again;
+ }
+ skb_trim(skb, dmalen);
+
+ prefetch(skb->data);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ cid = wil_rx_get_cid_by_skb(wil, skb);
+ if (cid == -ENOENT) {
+ kfree_skb(skb);
+ goto again;
+ }
+ wil_skb_set_cid(skb, (u8)cid);
+ stats = &wil->sta[cid].stats;
+
+ stats->last_mcs_rx = wil_rxdesc_mcs(d);
+ if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+ stats->rx_per_mcs[stats->last_mcs_rx]++;
+
+ /* use radiotap header only if required */
+ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+ wil_rx_add_radiotap_header(wil, skb);
+
+ /* no extra checks if in sniffer mode */
+ if (ndev->type != ARPHRD_ETHER)
+ return skb;
+ /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ * Driver should recognize it by frame type, that is found
+ * in Rx descriptor. If type is not data, it is 802.11 frame as is
+ */
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
+ u8 fc1 = wil_rxdesc_fc1(d);
+ int tid = wil_rxdesc_tid(d);
+ u16 seq = wil_rxdesc_seq(d);
+
+ wil_dbg_txrx(wil,
+ "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ stats->rx_non_data_frame++;
+ if (wil_is_back_req(fc1)) {
+ wil_dbg_txrx(wil,
+ "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+ wil_rx_bar(wil, vif, cid, tid, seq);
+ } else {
+ /* print again all info. One can enable only this
+ * without overhead for printing every Rx frame
+ */
+ wil_dbg_txrx(wil,
+ "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+ }
+ kfree_skb(skb);
+ goto again;
+ }
+
+ /* L4 IDENT is on when HW calculated checksum, check status
+ * and in case of error drop the packet
+ * higher stack layers will handle retransmission (if required)
+ */
+ if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
+ /* L4 protocol identified, csum calculated */
+ if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ else
+ stats->rx_csum_err++;
+ }
+
+ if (snaplen) {
+ /* Packet layout
+ * +-------+-------+---------+------------+------+
+ * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
+ * +-------+-------+---------+------------+------+
+ * Need to remove SNAP, shifting SA and DA forward
+ */
+ memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, snaplen);
+ }
+
+ return skb;
+}
+
+/* allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ * Note: we have a single RX queue for servicing all VIFs, but we
+ * allocate skbs with headroom according to main interface only. This
+ * means it will not work with monitor interface together with other VIFs.
+ * Currently we only support monitor interface on its own without other VIFs,
+ * and we will need to fix this code once we add support.
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil_ring *v = &wil->ring_rx;
+ u32 next_tail;
+ int rc = 0;
+ int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+ WIL6210_RTAP_SIZE : 0;
+
+ for (; next_tail = wil_ring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
+ rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+ if (unlikely(rc)) {
+ wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
+ rc, v->swtail);
+ break;
+ }
+ }
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, v->hwtail, v->swtail);
+
+ return rc;
+}
+
+/**
+ * reverse_memcmp - Compare two areas of memory, in reverse order
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ *
+ * Cut'n'paste from original memcmp (see lib/string.c)
+ * with minimal modifications
+ */
+int reverse_memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
+ --su1, --su2, count--) {
+ res = *su1 - *su2;
+ if (res)
+ break;
+ }
+ return res;
+}
+
+static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int cid = wil_skb_get_cid(skb);
+ int tid = wil_rxdesc_tid(d);
+ int key_id = wil_rxdesc_key_id(d);
+ int mc = wil_rxdesc_mcast(d);
+ struct wil_sta_info *s = &wil->sta[cid];
+ struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+ &s->tid_crypto_rx[tid];
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+ const u8 *pn = (u8 *)&d->mac.pn;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
+static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
+ (d->dma.error & RX_DMA_ERROR_MIC)) {
+ stats->rx_mic_error++;
+ wil_dbg_txrx(wil, "MIC error, dropping packet\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *cid = wil_skb_get_cid(skb);
+ *security = wil_rxdesc_security(d);
+}
+
+/*
+ * Check if skb is ptk eapol key message
+ *
+ * returns a pointer to the start of the eapol key structure, NULL
+ * if frame is not PTK eapol key
+ */
+static struct wil_eapol_key *wil_is_ptk_eapol_key(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ u8 *buf;
+ const struct wil_1x_hdr *hdr;
+ struct wil_eapol_key *key;
+ u16 key_info;
+ int len = skb->len;
+
+ if (!skb_mac_header_was_set(skb)) {
+ wil_err(wil, "mac header was not set\n");
+ return NULL;
+ }
+
+ len -= skb_mac_offset(skb);
+
+ if (len < sizeof(struct ethhdr) + sizeof(struct wil_1x_hdr) +
+ sizeof(struct wil_eapol_key))
+ return NULL;
+
+ buf = skb_mac_header(skb) + sizeof(struct ethhdr);
+
+ hdr = (const struct wil_1x_hdr *)buf;
+ if (hdr->type != WIL_1X_TYPE_EAPOL_KEY)
+ return NULL;
+
+ key = (struct wil_eapol_key *)(buf + sizeof(struct wil_1x_hdr));
+ if (key->type != WIL_EAPOL_KEY_TYPE_WPA &&
+ key->type != WIL_EAPOL_KEY_TYPE_RSN)
+ return NULL;
+
+ key_info = be16_to_cpu(key->key_info);
+ if (!(key_info & WIL_KEY_INFO_KEY_TYPE)) /* check if pairwise */
+ return NULL;
+
+ return key;
+}
+
+static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct wil_eapol_key *key;
+ u16 key_info;
+
+ key = wil_is_ptk_eapol_key(wil, skb);
+ if (!key)
+ return false;
+
+ key_info = be16_to_cpu(key->key_info);
+ if (key_info & (WIL_KEY_INFO_MIC |
+ WIL_KEY_INFO_ENCR_KEY_DATA)) {
+ /* 3/4 of 4-Way Handshake */
+ wil_dbg_misc(wil, "EAPOL key message 3\n");
+ return true;
+ }
+ /* 1/4 of 4-Way Handshake */
+ wil_dbg_misc(wil, "EAPOL key message 1\n");
+
+ return false;
+}
+
+static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct wil_eapol_key *key;
+ u32 *nonce, i;
+
+ key = wil_is_ptk_eapol_key(wil, skb);
+ if (!key)
+ return false;
+
+ nonce = (u32 *)key->key_nonce;
+ for (i = 0; i < WIL_EAP_NONCE_LEN / sizeof(u32); i++, nonce++) {
+ if (*nonce != 0) {
+ /* message 2/4 */
+ wil_dbg_misc(wil, "EAPOL key message 2\n");
+ return false;
+ }
+ }
+ wil_dbg_misc(wil, "EAPOL key message 4\n");
+
+ return true;
+}
+
+void wil_enable_tx_key_worker(struct work_struct *work)
+{
+ struct wil6210_vif *vif = container_of(work,
+ struct wil6210_vif, enable_tx_key_worker);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc, cid;
+
+ rtnl_lock();
+ if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) {
+ wil_dbg_misc(wil, "Invalid rekey state = %d\n",
+ vif->ptk_rekey_state);
+ rtnl_unlock();
+ return;
+ }
+
+ cid = wil_find_cid_by_idx(wil, vif->mid, 0);
+ if (!wil_cid_valid(wil, cid)) {
+ wil_err(wil, "Invalid cid = %d\n", cid);
+ rtnl_unlock();
+ return;
+ }
+
+ wil_dbg_misc(wil, "Apply PTK key after eapol was sent out\n");
+ rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL,
+ WMI_KEY_USE_APPLY_PTK);
+
+ vif->ptk_rekey_state = WIL_REKEY_IDLE;
+ rtnl_unlock();
+
+ if (rc)
+ wil_err(wil, "Apply PTK key failed %d\n", rc);
+}
+
+void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ bool q = false;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION ||
+ !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
+ return;
+
+ /* check if skb is an EAP message 4/4 */
+ if (!wil_skb_is_eap_4(wil, skb))
+ return;
+
+ spin_lock_bh(&wil->eap_lock);
+ switch (vif->ptk_rekey_state) {
+ case WIL_REKEY_IDLE:
+ /* ignore idle state, can happen due to M4 retransmission */
+ break;
+ case WIL_REKEY_M3_RECEIVED:
+ vif->ptk_rekey_state = WIL_REKEY_IDLE;
+ break;
+ case WIL_REKEY_WAIT_M4_SENT:
+ q = true;
+ break;
+ default:
+ wil_err(wil, "Unknown rekey state = %d",
+ vif->ptk_rekey_state);
+ }
+ spin_unlock_bh(&wil->eap_lock);
+
+ if (q) {
+ q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker);
+ wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n",
+ q);
+ }
+}
+
+static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION ||
+ !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
+ return;
+
+ /* check if skb is a EAP message 3/4 */
+ if (!wil_skb_is_eap_3(wil, skb))
+ return;
+
+ if (vif->ptk_rekey_state == WIL_REKEY_IDLE)
+ vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ * Called in softirq context (NAPI poll).
+ */
+void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
+ struct wil_net_stats *stats, bool gro)
+{
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ unsigned int len = skb->len;
+ u8 *sa, *da = wil_skb_get_da(skb);
+ /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
+ * is not suitable, need to look at data
+ */
+ int mcast = is_multicast_ether_addr(da);
+ struct sk_buff *xmit_skb = NULL;
+
+ if (wdev->iftype == NL80211_IFTYPE_STATION) {
+ sa = wil_skb_get_sa(skb);
+ if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
+ /* mcast packet looped back to us */
+ dev_kfree_skb(skb);
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ return;
+ }
+ } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
+ if (mcast) {
+ /* send multicast frames both to higher layers in
+ * local net stack and back to the wireless medium
+ */
+ xmit_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ int xmit_cid = wil_find_cid(wil, vif->mid, da);
+
+ if (xmit_cid >= 0) {
+ /* The destination station is associated to
+ * this AP (in this VLAN), so send the frame
+ * directly to it and do not pass it to local
+ * net stack.
+ */
+ xmit_skb = skb;
+ skb = NULL;
+ }
+ }
+ }
+ if (xmit_skb) {
+ /* Send to wireless media and increase priority by 256 to
+ * keep the received priority instead of reclassifying
+ * the frame (see cfg80211_classify8021d).
+ */
+ xmit_skb->dev = ndev;
+ xmit_skb->priority += 256;
+ xmit_skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(xmit_skb);
+ skb_reset_mac_header(xmit_skb);
+ wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
+ dev_queue_xmit(xmit_skb);
+ }
+
+ if (skb) { /* deliver to local stack */
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->dev = ndev;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ wil_rx_handle_eapol(vif, skb);
+
+ if (gro)
+ napi_gro_receive(&wil->napi_rx, skb);
+ else
+ netif_rx(skb);
+ }
+ ndev->stats.rx_packets++;
+ stats->rx_packets++;
+ ndev->stats.rx_bytes += len;
+ stats->rx_bytes += len;
+ if (mcast)
+ ndev->stats.multicast++;
+}
+
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+ int cid, security;
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct wil_net_stats *stats;
+
+ wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
+
+ stats = &wil->sta[cid].stats;
+
+ skb_orphan(skb);
+
+ if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
+ dev_kfree_skb(skb);
+ ndev->stats.rx_dropped++;
+ stats->rx_replay++;
+ stats->rx_dropped++;
+ return;
+ }
+
+ /* check errors reported by HW and update statistics */
+ if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ wil_netif_rx(skb, ndev, cid, stats, true);
+}
+
+/* Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
+ */
+void wil_rx_handle(struct wil6210_priv *wil, int *quota)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wil_ring *v = &wil->ring_rx;
+ struct sk_buff *skb;
+
+ if (unlikely(!v->va)) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "rx_handle\n");
+ while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
+ (*quota)--;
+
+ /* monitor is currently supported on main interface only */
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ skb->dev = ndev;
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+ wil_netif_rx_any(skb, ndev);
+ } else {
+ wil_rx_reorder(wil, skb);
+ }
+ }
+ wil_rx_refill(wil, v->size);
+}
+
+static void wil_rx_buf_len_init(struct wil6210_priv *wil)
+{
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+ if (mtu_max > wil->rx_buf_len) {
+ /* do not allow RX buffers to be smaller than mtu_max, for
+ * backward compatibility (mtu_max parameter was also used
+ * to support receiving large packets)
+ */
+ wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
+ wil->rx_buf_len = mtu_max;
+ }
+}
+
+static int wil_rx_init(struct wil6210_priv *wil, uint order)
+{
+ struct wil_ring *vring = &wil->ring_rx;
+ int rc;
+
+ wil_dbg_misc(wil, "rx_init\n");
+
+ if (vring->va) {
+ wil_err(wil, "Rx ring already allocated\n");
+ return -EINVAL;
+ }
+
+ wil_rx_buf_len_init(wil);
+
+ vring->size = 1 << order;
+ vring->is_rx = true;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ return rc;
+
+ rc = wmi_rx_chain_add(wil, vring);
+ if (rc)
+ goto err_free;
+
+ rc = wil_rx_refill(wil, vring->size);
+ if (rc)
+ goto err_free;
+
+ return 0;
+ err_free:
+ wil_vring_free(wil, vring);
+
+ return rc;
+}
+
+static void wil_rx_fini(struct wil6210_priv *wil)
+{
+ struct wil_ring *vring = &wil->ring_rx;
+
+ wil_dbg_misc(wil, "rx_fini\n");
+
+ if (vring->va)
+ wil_vring_free(wil, vring);
+}
+
+static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int vring_index)
+{
+ struct vring_tx_desc *d = &desc->legacy;
+
+ wil_desc_addr_set(&d->dma.addr, pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16((u16)len);
+ d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+void wil_tx_data_init(struct wil_ring_tx_data *txdata)
+{
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ txdata->idle = 0;
+ txdata->last_idle = 0;
+ txdata->begin = 0;
+ txdata->agg_wsize = 0;
+ txdata->agg_timeout = 0;
+ txdata->agg_amsdu = 0;
+ txdata->addba_in_progress = false;
+ txdata->mid = U8_MAX;
+ spin_unlock_bh(&txdata->lock);
+}
+
+static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
+ int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = cpu_to_le16(size),
+ },
+ .ringid = id,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 0,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply = {
+ .cmd = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+ if (cid >= WIL6210_RX_DESC_MAX_CID) {
+ cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
+ cmd.vring_cfg.cid = cid;
+ cmd.vring_cfg.tid = tid;
+ } else {
+ cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
+ }
+
+ wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
+ cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+ lockdep_assert_held(&wil->mutex);
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wil_tx_data_init(txdata);
+ vring->is_rx = false;
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[id][0] = cid;
+ wil->ring2cid_tid[id][1] = tid;
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ if (txdata->dot1x_open && (agg_wsize >= 0))
+ wil_addba_tx_request(wil, id, agg_wsize);
+
+ return 0;
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil_vring_free(wil, vring);
+ wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
+ wil->ring2cid_tid[id][1] = 0;
+
+ out:
+
+ return rc;
+}
+
+static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = 0,
+ },
+ .ringid = ring_id,
+ .cidxtid = mk_cidxtid(cid, tid),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 0,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply = {
+ .cmd = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring *vring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
+ cid, tid);
+ lockdep_assert_held(&wil->mutex);
+
+ if (!vring->va) {
+ wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
+ return -EINVAL;
+ }
+
+ if (wil->ring2cid_tid[ring_id][0] != cid ||
+ wil->ring2cid_tid[ring_id][1] != tid) {
+ wil_err(wil, "ring info does not match cid=%u tid=%u\n",
+ wil->ring2cid_tid[ring_id][0],
+ wil->ring2cid_tid[ring_id][1]);
+ }
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ goto fail;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx modify failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* set BA aggregation window size to 0 to force a new BA with the
+ * new AP
+ */
+ txdata->agg_wsize = 0;
+ if (txdata->dot1x_open && agg_wsize >= 0)
+ wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+ return 0;
+fail:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
+ wil->ring2cid_tid[ring_id][1] = 0;
+ return rc;
+}
+
+int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_bcast_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = cpu_to_le16(size),
+ },
+ .ringid = id,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ },
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply = {
+ .cmd = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring *vring = &wil->ring_tx[id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+
+ wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
+ cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+ lockdep_assert_held(&wil->mutex);
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wil_tx_data_init(txdata);
+ vring->is_rx = false;
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */
+ wil->ring2cid_tid[id][1] = 0; /* TID */
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+ rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
+ spin_unlock_bh(&txdata->lock);
+ wil_vring_free(wil, vring);
+ out:
+
+ return rc;
+}
+
+static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
+{
+ int i, cid;
+ const u8 *da = wil_skb_get_da(skb);
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ cid = wil_find_cid(wil, vif->mid, da);
+
+ if (cid < 0 || cid >= wil->max_assoc_sta)
+ return NULL;
+
+ /* TODO: fix for multiple TID */
+ for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
+ continue;
+ if (wil->ring2cid_tid[i][0] == cid) {
+ struct wil_ring *v = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
+
+ wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
+ da, i);
+ if (v->va && txdata->enabled) {
+ return v;
+ } else {
+ wil_dbg_txrx(wil,
+ "find_tx_ucast: vring[%d] not valid\n",
+ i);
+ return NULL;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
+
+static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wil_ring *ring;
+ int i;
+ u8 cid;
+ struct wil_ring_tx_data *txdata;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ /* In the STA mode, it is expected to have only 1 VRING
+ * for the AP we connected to.
+ * find 1-st vring eligible for this skb and use it.
+ */
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ ring = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
+ continue;
+
+ cid = wil->ring2cid_tid[i][0];
+ if (cid >= wil->max_assoc_sta) /* skip BCAST */
+ continue;
+
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
+ continue;
+
+ wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
+
+ return ring;
+ }
+
+ wil_dbg_txrx(wil, "Tx while no rings active?\n");
+
+ return NULL;
+}
+
+/* Use one of 2 strategies:
+ *
+ * 1. New (real broadcast):
+ * use dedicated broadcast vring
+ * 2. Old (pseudo-DMS):
+ * Find 1-st vring and return it;
+ * duplicate skb and send it to other active vrings;
+ * in all cases override dest address to unicast peer's address
+ * Use old strategy when new is not supported yet:
+ * - for PBSS
+ */
+static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wil_ring *v;
+ struct wil_ring_tx_data *txdata;
+ int i = vif->bcast_ring;
+
+ if (i < 0)
+ return NULL;
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!v->va || !txdata->enabled)
+ return NULL;
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
+ return NULL;
+
+ return v;
+}
+
+/* apply multicast to unicast only for ARP and IP packets
+ * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
+ */
+static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ const struct ethhdr *eth = (void *)skb->data;
+ const struct vlan_ethhdr *ethvlan = (void *)skb->data;
+ __be16 ethertype;
+
+ if (!wil->multicast_to_unicast)
+ return false;
+
+ /* multicast to unicast conversion only for some payload */
+ ethertype = eth->h_proto;
+ if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
+ ethertype = ethvlan->h_vlan_encapsulated_proto;
+ switch (ethertype) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb, int vring_index)
+{
+ u8 *da = wil_skb_get_da(skb);
+ int cid = wil->ring2cid_tid[vring_index][0];
+
+ ether_addr_copy(da, wil->sta[cid].addr);
+}
+
+static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wil_ring *v, *v2;
+ struct sk_buff *skb2;
+ int i;
+ u8 cid;
+ const u8 *src = wil_skb_get_sa(skb);
+ struct wil_ring_tx_data *txdata, *txdata2;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ /* find 1-st vring eligible for data */
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
+ continue;
+
+ cid = wil->ring2cid_tid[i][0];
+ if (cid >= wil->max_assoc_sta) /* skip BCAST */
+ continue;
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
+ continue;
+
+ /* don't Tx back to source when re-routing Rx->Tx at the AP */
+ if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+ continue;
+
+ goto found;
+ }
+
+ wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+
+ return NULL;
+
+found:
+ wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb, i);
+
+ /* find other active vrings and duplicate skb for each */
+ for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
+ v2 = &wil->ring_tx[i];
+ txdata2 = &wil->ring_tx_data[i];
+ if (!v2->va || txdata2->mid != vif->mid)
+ continue;
+ cid = wil->ring2cid_tid[i][0];
+ if (cid >= wil->max_assoc_sta) /* skip BCAST */
+ continue;
+ if (!wil->ring_tx_data[i].dot1x_open &&
+ skb->protocol != cpu_to_be16(ETH_P_PAE))
+ continue;
+
+ if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+ continue;
+
+ skb2 = skb_copy(skb, GFP_ATOMIC);
+ if (skb2) {
+ wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb2, i);
+ wil_tx_ring(wil, vif, v2, skb2);
+ /* successful call to wil_tx_ring takes skb2 ref */
+ dev_kfree_skb_any(skb2);
+ } else {
+ wil_err(wil, "skb_copy failed\n");
+ }
+ }
+
+ return v;
+}
+
+static inline
+void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
+{
+ d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+}
+
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+ struct sk_buff *skb,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len)
+{
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+ d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+ /* Setup TSO: bit and desc type */
+ d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+ (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+ d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+ d->dma.ip_length = skb_net_hdr_len;
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/* Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+ struct sk_buff *skb){
+ int protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ protocol = ip_hdr(skb)->protocol;
+ d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |=
+ (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ case IPPROTO_UDP:
+ /* L4 header len: UDP header length */
+ d->dma.d0 |=
+ (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ d->dma.ip_length = skb_network_header_len(skb);
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+
+ return 0;
+}
+
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+ BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+ BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+ d->dma.d0 |= wil_tso_type_lst <<
+ DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *vring, struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ /* point to descriptors in shared memory */
+ volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+ *_first_desc = NULL;
+
+ /* pointers to shadow descriptors */
+ struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+ *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+ *first_desc = &first_desc_mem;
+
+ /* pointer to shadow descriptors' context */
+ struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+ int descs_used = 0; /* total number of used descriptors */
+ int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+ u32 swhead = vring->swhead;
+ int used, avail = wil_ring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 1;
+ int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
+ int f, len, hdrlen, headlen;
+ int vring_index = vring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
+ uint i = swhead;
+ dma_addr_t pa;
+ const skb_frag_t *frag = NULL;
+ int rem_data = mss;
+ int lenmss;
+ int hdr_compensation_need = true;
+ int desc_tso_type = wil_tso_type_first;
+ bool is_ipv4;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int gso_type;
+ int rc = -EINVAL;
+
+ wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
+ vring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ /* A typical page 4K is 3-4 payloads, we assume each fragment
+ * is a full payload, that's how min_desc_required has been
+ * calculated. In real we might need more or less descriptors,
+ * this is the initial check only.
+ */
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ vring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ /* Header Length = MAC header len + IP header len + TCP header len*/
+ hdrlen = skb_tcp_all_headers(skb);
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ /* TCP v4, zero out the IP length and IPv4 checksum fields
+ * as required by the offloading doc
+ */
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ /* TCP v6, zero out the payload length */
+ ipv6_hdr(skb)->payload_len = 0;
+ is_ipv4 = false;
+ break;
+ default:
+ /* other than TCPv4 or TCPv6 types are not supported for TSO.
+ * It is also illegal for both to be set simultaneously
+ */
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read then once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ _hdr_desc = &vring->va[i].tx.legacy;
+
+ pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb head DMA map error\n");
+ goto err_exit;
+ }
+
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
+ hdrlen, vring_index);
+ wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len);
+ wil_tx_last_desc(hdr_desc);
+
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ hdr_ctx = &vring->ctx[i];
+
+ descs_used++;
+ headlen = skb_headlen(skb) - hdrlen;
+
+ for (f = headlen ? -1 : 0; f < nr_frags; f++) {
+ if (headlen) {
+ len = headlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+ len);
+ } else {
+ frag = &skb_shinfo(skb)->frags[f];
+ len = skb_frag_size(frag);
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+ }
+
+ while (len) {
+ wil_dbg_txrx(wil,
+ "TSO: len %d, rem_data %d, descs_used %d\n",
+ len, rem_data, descs_used);
+
+ if (descs_used == avail) {
+ wil_err_ratelimited(wil, "TSO: ring overflow\n");
+ rc = -ENOMEM;
+ goto mem_error;
+ }
+
+ lenmss = min_t(int, rem_data, len);
+ i = (swhead + descs_used) % vring->size;
+ wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+ if (!headlen) {
+ pa = skb_frag_dma_map(dev, frag,
+ skb_frag_size(frag) - len,
+ lenmss, DMA_TO_DEVICE);
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
+ } else {
+ pa = dma_map_single(dev,
+ skb->data +
+ skb_headlen(skb) - headlen,
+ lenmss,
+ DMA_TO_DEVICE);
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ headlen -= lenmss;
+ }
+
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: DMA map page error\n");
+ goto mem_error;
+ }
+
+ _desc = &vring->va[i].tx.legacy;
+
+ if (!_first_desc) {
+ _first_desc = _desc;
+ first_ctx = &vring->ctx[i];
+ d = first_desc;
+ } else {
+ d = &desc_mem;
+ }
+
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, lenmss, vring_index);
+ wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+ is_ipv4, tcp_hdr_len,
+ skb_net_hdr_len);
+
+ /* use tso_type_first only once */
+ desc_tso_type = wil_tso_type_mid;
+
+ descs_used++; /* desc used so far */
+ sg_desc_cnt++; /* desc used for this segment */
+ len -= lenmss;
+ rem_data -= lenmss;
+
+ wil_dbg_txrx(wil,
+ "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+ len, rem_data, descs_used, sg_desc_cnt);
+
+ /* Close the segment if reached mss size or last frag*/
+ if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+ if (hdr_compensation_need) {
+ /* first segment include hdr desc for
+ * release
+ */
+ hdr_ctx->nr_frags = sg_desc_cnt;
+ wil_tx_desc_set_nr_frags(first_desc,
+ sg_desc_cnt +
+ 1);
+ hdr_compensation_need = false;
+ } else {
+ wil_tx_desc_set_nr_frags(first_desc,
+ sg_desc_cnt);
+ }
+ first_ctx->nr_frags = sg_desc_cnt - 1;
+
+ wil_tx_last_desc(d);
+
+ /* first descriptor may also be the last
+ * for this mss - make sure not to copy
+ * it twice
+ */
+ if (first_desc != d)
+ *_first_desc = *first_desc;
+
+ /*last descriptor will be copied at the end
+ * of this TS processing
+ */
+ if (f < nr_frags - 1 || len > 0)
+ *_desc = *d;
+
+ rem_data = mss;
+ _first_desc = NULL;
+ sg_desc_cnt = 0;
+ } else if (first_desc != d) /* update mid descriptor */
+ *_desc = *d;
+ }
+ }
+
+ if (!_desc)
+ goto mem_error;
+
+ /* first descriptor may also be the last.
+ * in this case d pointer is invalid
+ */
+ if (_first_desc == _desc)
+ d = first_desc;
+
+ /* Last data descriptor */
+ wil_set_tx_desc_last_tso(d);
+ *_desc = *d;
+
+ /* Fill the total number of descriptors in first desc (hdr)*/
+ wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+ *_hdr_desc = *hdr_desc;
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i].skb = skb_get(skb);
+
+ /* performance monitoring */
+ used = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ vring_index, used, used + descs_used);
+ }
+
+ /* Make sure to advance the head only after descriptor update is done.
+ * This will prevent a race condition where the completion thread
+ * will see the DU bit set from previous run and will handle the
+ * skb before it was completed.
+ */
+ wmb();
+
+ /* advance swhead */
+ wil_ring_advance_head(vring, descs_used);
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, vring->hwtail, vring->swhead);
+ return 0;
+
+mem_error:
+ while (descs_used > 0) {
+ struct wil_ctx *ctx;
+
+ i = (swhead + descs_used - 1) % vring->size;
+ d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
+ _desc = &vring->va[i].tx.legacy;
+ *d = *_desc;
+ _desc->dma.status = TX_DMA_STATUS_DU;
+ ctx = &vring->ctx[i];
+ wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+err_exit:
+ return rc;
+}
+
+static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct vring_tx_desc dd, *d = &dd;
+ volatile struct vring_tx_desc *_d;
+ u32 swhead = ring->swhead;
+ int avail = wil_ring_avail_tx(ring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ uint f = 0;
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+ uint i = swhead;
+ dma_addr_t pa;
+ int used;
+ bool mcast = (ring_index == vif->bcast_ring);
+ uint len = skb_headlen(skb);
+
+ wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
+ skb->len, ring_index, nr_frags);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ if (unlikely(avail < 1 + nr_frags)) {
+ wil_err_ratelimited(wil,
+ "Tx ring[%2d] full. No space for %d fragments\n",
+ ring_index, 1 + nr_frags);
+ return -ENOMEM;
+ }
+ _d = &ring->va[i].tx.legacy;
+
+ pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
+ skb_headlen(skb), skb->data, &pa);
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ return -EINVAL;
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
+ /* 1-st segment */
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
+ ring_index);
+ if (unlikely(mcast)) {
+ d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
+ if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
+ d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
+ }
+ /* Process TCP/UDP checksum offloading */
+ if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
+ wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
+ ring_index);
+ goto dma_error;
+ }
+
+ ring->ctx[i].nr_frags = nr_frags;
+ wil_tx_desc_set_nr_frags(d, nr_frags + 1);
+
+ /* middle segments */
+ for (; f < nr_frags; f++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+
+ *_d = *d;
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+ i = (swhead + f + 1) % ring->size;
+ _d = &ring->va[i].tx.legacy;
+ pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "Tx[%2d] failed to map fragment\n",
+ ring_index);
+ goto dma_error;
+ }
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
+ pa, len, ring_index);
+ /* no need to check return code -
+ * if it succeeded for 1-st descriptor,
+ * it will succeed here too
+ */
+ wil_tx_desc_offload_setup(d, skb);
+ }
+ /* for the last seg only */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+ *_d = *d;
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ ring->ctx[i].skb = skb_get(skb);
+
+ /* performance monitoring */
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used, used + nr_frags + 1)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ ring_index, used, used + nr_frags + 1);
+ }
+
+ /* Make sure to advance the head only after descriptor update is done.
+ * This will prevent a race condition where the completion thread
+ * will see the DU bit set from previous run and will handle the
+ * skb before it was completed.
+ */
+ wmb();
+
+ /* advance swhead */
+ wil_ring_advance_head(ring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
+ ring->swhead);
+ trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return 0;
+ dma_error:
+ /* unmap what we have mapped */
+ nr_frags = f + 1; /* frags mapped + one for skb head */
+ for (f = 0; f < nr_frags; f++) {
+ struct wil_ctx *ctx;
+
+ i = (swhead + f) % ring->size;
+ ctx = &ring->ctx[i];
+ _d = &ring->va[i].tx.legacy;
+ *d = *_d;
+ _d->dma.status = TX_DMA_STATUS_DU;
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
+
+ memset(ctx, 0, sizeof(*ctx));
+ }
+
+ return -EINVAL;
+}
+
+static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb)
+{
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+ int rc;
+
+ spin_lock(&txdata->lock);
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) {
+ wil_dbg_txrx(wil,
+ "suspend/resume in progress. drop packet\n");
+ spin_unlock(&txdata->lock);
+ return -EINVAL;
+ }
+
+ rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
+ (wil, vif, ring, skb);
+
+ spin_unlock(&txdata->lock);
+
+ return rc;
+}
+
+/* Check status of tx vrings and stop/wake net queues if needed
+ * It will start/stop net queues of a specific VIF net_device.
+ *
+ * This function does one of two checks:
+ * In case check_stop is true, will check if net queues need to be stopped. If
+ * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
+ * In case check_stop is false, will check if net queues need to be waked. If
+ * the conditions for waking are met, netif_tx_wake_all_queues() is called.
+ * vring is the vring which is currently being modified by either adding
+ * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
+ * be null when irrelevant (e.g. connect/disconnect events).
+ *
+ * The implementation is to stop net queues if modified vring has low
+ * descriptor availability. Wake if all vrings are not in low descriptor
+ * availability and modified vring has high descriptor availability.
+ */
+static inline void __wil_update_net_queues(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct wil_ring *ring,
+ bool check_stop)
+{
+ int i;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+
+ if (unlikely(!vif))
+ return;
+
+ if (ring)
+ wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
+ (int)(ring - wil->ring_tx), vif->mid, check_stop,
+ vif->net_queue_stopped);
+ else
+ wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
+ check_stop, vif->mid, vif->net_queue_stopped);
+
+ if (ring && drop_if_ring_full)
+ /* no need to stop/wake net queues */
+ return;
+
+ if (check_stop == vif->net_queue_stopped)
+ /* net queues already in desired state */
+ return;
+
+ if (check_stop) {
+ if (!ring || unlikely(wil_ring_avail_low(ring))) {
+ /* not enough room in the vring */
+ netif_tx_stop_all_queues(vif_to_ndev(vif));
+ vif->net_queue_stopped = true;
+ wil_dbg_txrx(wil, "netif_tx_stop called\n");
+ }
+ return;
+ }
+
+ /* Do not wake the queues in suspend flow */
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status))
+ return;
+
+ /* check wake */
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct wil_ring *cur_ring = &wil->ring_tx[i];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
+
+ if (txdata->mid != vif->mid || !cur_ring->va ||
+ !txdata->enabled || cur_ring == ring)
+ continue;
+
+ if (wil_ring_avail_low(cur_ring)) {
+ wil_dbg_txrx(wil, "ring %d full, can't wake\n",
+ (int)(cur_ring - wil->ring_tx));
+ return;
+ }
+ }
+
+ if (!ring || wil_ring_avail_high(ring)) {
+ /* enough room in the ring */
+ wil_dbg_txrx(wil, "calling netif_tx_wake\n");
+ netif_tx_wake_all_queues(vif_to_ndev(vif));
+ vif->net_queue_stopped = false;
+ }
+}
+
+void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, bool check_stop)
+{
+ spin_lock(&wil->net_queue_lock);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
+ spin_unlock(&wil->net_queue_lock);
+}
+
+void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, bool check_stop)
+{
+ spin_lock_bh(&wil->net_queue_lock);
+ __wil_update_net_queues(wil, vif, ring, check_stop);
+ spin_unlock_bh(&wil->net_queue_lock);
+}
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ const u8 *da = wil_skb_get_da(skb);
+ bool bcast = is_multicast_ether_addr(da);
+ struct wil_ring *ring;
+ static bool pr_once_fw;
+ int rc;
+
+ wil_dbg_txrx(wil, "start_xmit\n");
+ if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
+ if (!pr_once_fw) {
+ wil_err(wil, "FW not ready\n");
+ pr_once_fw = true;
+ }
+ goto drop;
+ }
+ if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
+ wil_dbg_ratelimited(wil,
+ "VIF not connected, packet dropped\n");
+ goto drop;
+ }
+ if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
+ wil_err(wil, "Xmit in monitor mode not supported\n");
+ goto drop;
+ }
+ pr_once_fw = false;
+
+ /* find vring */
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
+ /* in STA mode (ESS), all to same VRING (to AP) */
+ ring = wil_find_tx_ring_sta(wil, vif, skb);
+ } else if (bcast) {
+ if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
+ /* in pbss, no bcast VRING - duplicate skb in
+ * all stations VRINGs
+ */
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
+ else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
+ /* AP has a dedicated bcast VRING */
+ ring = wil_find_tx_bcast_1(wil, vif, skb);
+ else
+ /* unexpected combination, fallback to duplicating
+ * the skb in all stations VRINGs
+ */
+ ring = wil_find_tx_bcast_2(wil, vif, skb);
+ } else {
+ /* unicast, find specific VRING by dest. address */
+ ring = wil_find_tx_ucast(wil, vif, skb);
+ }
+ if (unlikely(!ring)) {
+ wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
+ goto drop;
+ }
+ /* set up vring entry */
+ rc = wil_tx_ring(wil, vif, ring, skb);
+
+ switch (rc) {
+ case 0:
+ /* shall we stop net queues? */
+ wil_update_net_queues_bh(wil, vif, ring, true);
+ /* statistics will be updated on the tx_complete */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ case -ENOMEM:
+ if (drop_if_ring_full)
+ goto drop;
+ return NETDEV_TX_BUSY;
+ default:
+ break; /* goto drop; */
+ }
+ drop:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+
+ return NET_XMIT_DROP;
+}
+
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta)
+{
+ int skb_time_us;
+ int bin;
+
+ if (!wil->tx_latency)
+ return;
+
+ if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
+ return;
+
+ skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
+ bin = skb_time_us / wil->tx_latency_res;
+ bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
+
+ wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
+ sta->tx_latency_bins[bin]++;
+ sta->stats.tx_latency_total_us += skb_time_us;
+ if (skb_time_us < sta->stats.tx_latency_min_us)
+ sta->stats.tx_latency_min_us = skb_time_us;
+ if (skb_time_us > sta->stats.tx_latency_max_us)
+ sta->stats.tx_latency_max_us = skb_time_us;
+}
+
+/* Clean up transmitted skb's from the Tx VRING
+ *
+ * Return number of descriptors cleared
+ *
+ * Safe to call from IRQ
+ */
+int wil_tx_complete(struct wil6210_vif *vif, int ringid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ring *vring = &wil->ring_tx[ringid];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
+ int done = 0;
+ int cid = wil->ring2cid_tid[ringid][0];
+ struct wil_net_stats *stats = NULL;
+ volatile struct vring_tx_desc *_d;
+ int used_before_complete;
+ int used_new;
+
+ if (unlikely(!vring->va)) {
+ wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+ return 0;
+ }
+
+ if (unlikely(!txdata->enabled)) {
+ wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
+ return 0;
+ }
+
+ wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
+
+ used_before_complete = wil_ring_used_tx(vring);
+
+ if (cid < wil->max_assoc_sta)
+ stats = &wil->sta[cid].stats;
+
+ while (!wil_ring_is_empty(vring)) {
+ int new_swtail;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ /* For the fragmented skb, HW will set DU bit only for the
+ * last fragment. look for it.
+ * In TSO the first DU will include hdr desc
+ */
+ int lf = (vring->swtail + ctx->nr_frags) % vring->size;
+ /* TODO: check we are not past head */
+
+ _d = &vring->va[lf].tx.legacy;
+ if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
+ break;
+
+ new_swtail = (lf + 1) % vring->size;
+ while (vring->swtail != new_swtail) {
+ struct vring_tx_desc dd, *d = &dd;
+ u16 dmalen;
+ struct sk_buff *skb;
+
+ ctx = &vring->ctx[vring->swtail];
+ skb = ctx->skb;
+ _d = &vring->va[vring->swtail].tx.legacy;
+
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+ d->dma.error);
+ wil_dbg_txrx(wil,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ ringid, vring->swtail, dmalen,
+ d->dma.status, d->dma.error);
+ wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil->txrx_ops.tx_desc_unmap(dev,
+ (union wil_tx_desc *)d,
+ ctx);
+
+ if (skb) {
+ if (likely(d->dma.error == 0)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
+ }
+ } else {
+ ndev->stats.tx_errors++;
+ if (stats)
+ stats->tx_errors++;
+ }
+
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ wil_tx_complete_handle_eapol(vif, skb);
+
+ wil_consume_skb(skb, d->dma.error == 0);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_ring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
+ /* There is no need to touch HW descriptor:
+ * - ststus bit TX_DMA_STATUS_DU is set by design,
+ * so hardware will not try to process this desc.,
+ * - rest of descriptor will be initialized on Tx.
+ */
+ vring->swtail = wil_ring_next_tail(vring);
+ done++;
+ }
+ }
+
+ /* performance monitoring */
+ used_new = wil_ring_used_tx(vring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ringid, used_before_complete, used_new);
+ txdata->last_idle = get_cycles();
+ }
+
+ /* shall we wake net queues? */
+ if (done)
+ wil_update_net_queues(wil, vif, vring, false);
+
+ return done;
+}
+
+static inline int wil_tx_init(struct wil6210_priv *wil)
+{
+ return 0;
+}
+
+static inline void wil_tx_fini(struct wil6210_priv *wil) {}
+
+static void wil_get_reorder_params(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+
+ *tid = wil_rxdesc_tid(d);
+ *cid = wil_skb_get_cid(skb);
+ *mid = wil_rxdesc_mid(d);
+ *seq = wil_rxdesc_seq(d);
+ *mcast = wil_rxdesc_mcast(d);
+ *retry = wil_rxdesc_retry(d);
+}
+
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation;
+ /* TX ops */
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
+ wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
+ wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
+ wil->txrx_ops.ring_fini_tx = wil_vring_free;
+ wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
+ wil->txrx_ops.tx_init = wil_tx_init;
+ wil->txrx_ops.tx_fini = wil_tx_fini;
+ wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
+ wil->txrx_ops.get_netif_rx_params =
+ wil_get_netif_rx_params;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
+ wil->txrx_ops.rx_fini = wil_rx_fini;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644
index 000000000..689f68d89
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#include "wil6210.h"
+#include "txrx_edma.h"
+
+#define BUF_SW_OWNED (1)
+#define BUF_HW_OWNED (0)
+
+/* default size of MAC Tx/Rx buffers */
+#define TXRX_BUF_LEN_DEFAULT (2048)
+
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+
+static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
+{
+ return le32_to_cpu(addr->addr_low) |
+ ((u64)le16_to_cpu(addr->addr_high) << 32);
+}
+
+static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+}
+
+/* Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrupt_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5 : mac_id_en:1
+ * bit 6..7 : mac_id:2
+ * bit 8..14 : reserved0:7
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit 0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+ u32 d[3];
+ u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_MAC_ID_EN_POS 5
+#define MAC_CFG_DESC_TX_1_MAC_ID_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAC_ID_EN_MSK 0x20
+
+#define MAC_CFG_DESC_TX_1_MAC_ID_POS 6
+#define MAC_CFG_DESC_TX_1_MAC_ID_LEN 2
+#define MAC_CFG_DESC_TX_1_MAC_ID_MSK 0xc0
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS 9
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_MSK 0x200
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
+
+#define TX_DMA_STATUS_DU BIT(0)
+
+/* Tx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8 : cmd_eop:1 This descriptor is the last one in the packet
+ * bit 9 : reserved
+ * bit 10 : cmd_dma_it:1 immediate interrupt
+ * bit 11..12 : SBD - Segment Buffer Details
+ * 00 - Header Segment
+ * 01 - First Data Segment
+ * 10 - Medium Data Segment
+ * 11 - Last Data Segment
+ * bit 13 : TSE - TCP Segmentation Enable
+ * bit 14 : IIC - Directs the HW to Insert IPv4 Checksum
+ * bit 15 : ITC - Directs the HW to Insert TCP/UDP Checksum
+ * bit 16..20 : QID - The target QID that the packet should be stored
+ * in the MAC.
+ * bit 21 : PO - Pseudo header Offload:
+ * 0 - Use the pseudo header value from the TCP checksum field
+ * 1- Calculate Pseudo header Checksum
+ * bit 22 : NC - No UDP Checksum
+ * bit 23..29 : reserved
+ * bit 30..31 : L4T - Layer 4 Type: 00 - UDP , 10 - TCP , 10, 11 - Reserved
+ * If L4Len equal 0, no L4 at all
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * [byte 12] error
+ * bit 0 2 : mac_status:3
+ * bit 3 7 : reserved:5
+ * [byte 13] status
+ * bit 0 : DU:1 Descriptor Used
+ * bit 1 7 : reserved:7
+ * [word 7] length
+ */
+struct vring_tx_dma {
+ u32 d0;
+ struct wil_ring_dma_addr addr;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ u8 error; /* 0..2: err; 3..7: reserved; */
+ u8 status; /* 0: used; 1..7; reserved */
+ __le16 length;
+} __packed;
+
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+ wil_tso_type_hdr = 0,
+ wil_tso_type_first = 1,
+ wil_tso_type_mid = 2,
+ wil_tso_type_lst = 3,
+};
+
+/* Rx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit 4.. 6 : cid:3 The Source index that was found during parsing the TA.
+ * This field is used to define the source of the packet
+ * bit 7 : MAC_id_valid:1, 1 if MAC virtual number is valid.
+ * bit 8.. 9 : mid:2 The MAC virtual number
+ * bit 10..11 : frame_type:2 : The FC (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC (b7-4) - Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit 0.. 3 : reserved
+ * bit 4.. 5 : key_id:2
+ * bit 6 : decrypt_bypass:1
+ * bit 7 : security:1 FC (b14)
+ * bit 8.. 9 : ds_bits:2 FC (b9-8)
+ * bit 10 : a_msdu_present:1 QoS (b7)
+ * bit 11 : a_msdu_type:1 QoS (b8)
+ * bit 12 : a_mpdu:1 part of AMPDU aggregation
+ * bit 13 : broadcast:1
+ * bit 14 : mutlicast:1
+ * bit 15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4 this signal tells the DMA to assert an interrupt
+ * after it writes the packet
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit 3.. 4 : fc_protocol_ver:1 The FC (b1-0) - Protocol Version
+ * bit 5 : fc_order:1 The FC Control (b15) -Order
+ * bit 6.. 7 : qos_ack_policy:2 The QoS (b6-5) ack policy Field
+ * bit 8 : esop:1 The QoS (b4) ESOP field
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1 QoS (b15)
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit 0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+ u32 d0;
+ u32 d1;
+ u16 w4;
+ struct_group_attr(pn, __packed,
+ u16 pn_15_0;
+ u32 pn_47_16;
+ );
+} __packed;
+
+/* Rx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length. The field is only valid if
+ * L4I bit is set
+ * bit 8 : cmd_eop:1 set to 1
+ * bit 9 : cmd_rt:1 set to 1
+ * bit 10 : cmd_dma_it:1 immediate interrupt
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14 It is valid when the PII is set.
+ * When the FFM bit is set bits 29-27 are used for
+ * Flex Filter Match. Matching Index to one of the L2
+ * EtherType Flex Filter
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * 00 - UDP, 01 - TCP, 10, 11 - reserved
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8 The filed is valid only if the L3I bit is set
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * [byte 12] error
+ * bit 0 : FCS:1
+ * bit 1 : MIC:1
+ * bit 2 : Key miss:1
+ * bit 3 : Replay:1
+ * bit 4 : L3:1 IPv4 checksum
+ * bit 5 : L4:1 TCP/UDP checksum
+ * bit 6 7 : reserved:2
+ * [byte 13] status
+ * bit 0 : DU:1 Descriptor Used
+ * bit 1 : EOP:1 The descriptor indicates the End of Packet
+ * bit 2 : error:1
+ * bit 3 : MI:1 MAC Interrupt is asserted (according to parser decision)
+ * bit 4 : L3I:1 L3 identified and checksum calculated
+ * bit 5 : L4I:1 L4 identified and checksum calculated
+ * bit 6 : PII:1 PHY Info Included in the packet
+ * bit 7 : FFM:1 EtherType Flex Filter Match
+ * [word 7] length
+ */
+
+#define RX_DMA_D0_CMD_DMA_EOP BIT(8)
+#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */
+#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */
+#define RX_MAC_D0_MAC_ID_VALID BIT(7)
+
+/* Error field */
+#define RX_DMA_ERROR_FCS BIT(0)
+#define RX_DMA_ERROR_MIC BIT(1)
+#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */
+#define RX_DMA_ERROR_REPLAY BIT(3)
+#define RX_DMA_ERROR_L3_ERR BIT(4)
+#define RX_DMA_ERROR_L4_ERR BIT(5)
+
+/* Status field */
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_EOP BIT(1)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */
+#define RX_DMA_STATUS_L3I BIT(4)
+#define RX_DMA_STATUS_L4I BIT(5)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
+#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */
+
+/* IEEE 802.11, 8.5.2 EAPOL-Key frames */
+#define WIL_KEY_INFO_KEY_TYPE BIT(3) /* val of 1 = Pairwise, 0 = Group key */
+
+#define WIL_KEY_INFO_MIC BIT(8)
+#define WIL_KEY_INFO_ENCR_KEY_DATA BIT(12) /* for rsn only */
+
+#define WIL_EAP_NONCE_LEN 32
+#define WIL_EAP_KEY_RSC_LEN 8
+#define WIL_EAP_REPLAY_COUNTER_LEN 8
+#define WIL_EAP_KEY_IV_LEN 16
+#define WIL_EAP_KEY_ID_LEN 8
+
+enum {
+ WIL_1X_TYPE_EAP_PACKET = 0,
+ WIL_1X_TYPE_EAPOL_START = 1,
+ WIL_1X_TYPE_EAPOL_LOGOFF = 2,
+ WIL_1X_TYPE_EAPOL_KEY = 3,
+};
+
+#define WIL_EAPOL_KEY_TYPE_RSN 2
+#define WIL_EAPOL_KEY_TYPE_WPA 254
+
+struct wil_1x_hdr {
+ u8 version;
+ u8 type;
+ __be16 length;
+ /* followed by data */
+} __packed;
+
+struct wil_eapol_key {
+ u8 type;
+ __be16 key_info;
+ __be16 key_length;
+ u8 replay_counter[WIL_EAP_REPLAY_COUNTER_LEN];
+ u8 key_nonce[WIL_EAP_NONCE_LEN];
+ u8 key_iv[WIL_EAP_KEY_IV_LEN];
+ u8 key_rsc[WIL_EAP_KEY_RSC_LEN];
+ u8 key_id[WIL_EAP_KEY_ID_LEN];
+} __packed;
+
+struct vring_rx_dma {
+ u32 d0;
+ struct wil_ring_dma_addr addr;
+ u8 ip_length;
+ u8 b11;
+ u8 error;
+ u8 status;
+ __le16 length;
+} __packed;
+
+struct vring_tx_desc {
+ struct vring_tx_mac mac;
+ struct vring_tx_dma dma;
+} __packed;
+
+union wil_tx_desc {
+ struct vring_tx_desc legacy;
+ struct wil_tx_enhanced_desc enhanced;
+} __packed;
+
+struct vring_rx_desc {
+ struct vring_rx_mac mac;
+ struct vring_rx_dma dma;
+} __packed;
+
+union wil_rx_desc {
+ struct vring_rx_desc legacy;
+ struct wil_rx_enhanced_desc enhanced;
+} __packed;
+
+union wil_ring_desc {
+ union wil_tx_desc tx;
+ union wil_rx_desc rx;
+} __packed;
+
+struct packet_rx_info {
+ u8 cid;
+};
+
+/* this struct will be stored in the skb cb buffer
+ * max length of the struct is limited to 48 bytes
+ */
+struct skb_rx_info {
+ struct vring_rx_desc rx_desc;
+ struct packet_rx_info rx_info;
+};
+
+static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 0, 3);
+}
+
+static inline int wil_rxdesc_cid(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 4, 6);
+}
+
+static inline int wil_rxdesc_mid(struct vring_rx_desc *d)
+{
+ return (d->mac.d0 & RX_MAC_D0_MAC_ID_VALID) ?
+ WIL_GET_BITS(d->mac.d0, 8, 9) : 0;
+}
+
+static inline int wil_rxdesc_ftype(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+static inline int wil_rxdesc_subtype(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 12, 15);
+}
+
+/* 1-st byte (with frame type/subtype) of FC field */
+static inline u8 wil_rxdesc_fc1(struct vring_rx_desc *d)
+{
+ return (u8)(WIL_GET_BITS(d->mac.d0, 10, 15) << 2);
+}
+
+static inline int wil_rxdesc_seq(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 16, 27);
+}
+
+static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 28, 31);
+}
+
+static inline int wil_rxdesc_retry(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 31, 31);
+}
+
+static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 4, 5);
+}
+
+static inline int wil_rxdesc_security(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 7, 7);
+}
+
+static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_mcs(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_mcast(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 13, 14);
+}
+
+static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
+{
+ return (void *)skb->cb;
+}
+
+static inline int wil_ring_is_empty(struct wil_ring *ring)
+{
+ return ring->swhead == ring->swtail;
+}
+
+static inline u32 wil_ring_next_tail(struct wil_ring *ring)
+{
+ return (ring->swtail + 1) % ring->size;
+}
+
+static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
+{
+ ring->swhead = (ring->swhead + n) % ring->size;
+}
+
+static inline int wil_ring_is_full(struct wil_ring *ring)
+{
+ return wil_ring_next_tail(ring) == ring->swhead;
+}
+
+static inline u8 *wil_skb_get_da(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return eth->h_dest;
+}
+
+static inline u8 *wil_skb_get_sa(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return eth->h_source;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ const u8 *da = wil_skb_get_da(skb);
+
+ return is_unicast_ether_addr(da) && skb->sk &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+ if (unlikely(wil_need_txstat(skb)))
+ skb_complete_wifi_ack(skb, acked);
+ else
+ acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
+/* Used space in Tx ring */
+static inline int wil_ring_used_tx(struct wil_ring *ring)
+{
+ u32 swhead = ring->swhead;
+ u32 swtail = ring->swtail;
+
+ return (ring->size + swhead - swtail) % ring->size;
+}
+
+/* Available space in Tx ring */
+static inline int wil_ring_avail_tx(struct wil_ring *ring)
+{
+ return ring->size - wil_ring_used_tx(ring) - 1;
+}
+
+static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
+{
+ /* In Enhanced DMA ring 0 is reserved for RX */
+ return wil->use_enhanced_dma_hw ? 1 : 0;
+}
+
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+ return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+ return val >= min && val < max;
+}
+
+static inline u8 wil_skb_get_cid(struct sk_buff *skb)
+{
+ struct skb_rx_info *skb_rx_info = (void *)skb->cb;
+
+ return skb_rx_info->rx_info.cid;
+}
+
+static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid)
+{
+ struct skb_rx_info *skb_rx_info = (void *)skb->cb;
+
+ skb_rx_info->rx_info.cid = cid;
+}
+
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
+void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
+ struct wil_net_stats *stats, bool gro);
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ u8 cid, u8 tid, u16 seq);
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn);
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r);
+void wil_tx_data_init(struct wil_ring_tx_data *txdata);
+void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
+void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_sta_info *sta);
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
new file mode 100644
index 000000000..1ba1f21eb
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -0,0 +1,1647 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "wil6210.h"
+#include "txrx_edma.h"
+#include "txrx.h"
+#include "trace.h"
+
+/* Max number of entries (packets to complete) to update the hwtail of tx
+ * status ring. Should be power of 2
+ */
+#define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
+#define WIL_EDMA_MAX_DATA_OFFSET (2)
+/* RX buffer size must be aligned to 4 bytes */
+#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+#define MAX_INVALID_BUFF_ID_RETRY (3)
+
+static void wil_tx_desc_unmap_edma(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx)
+{
+ struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
+ dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int wil_find_free_sring(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
+ if (!wil->srings[i].va)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static void wil_sring_free(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+
+ if (!sring || !sring->va)
+ return;
+
+ sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
+ sz, sring->va, &sring->pa);
+
+ dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
+ sring->pa = 0;
+ sring->va = NULL;
+}
+
+static int wil_sring_alloc(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = sring->elem_size * sring->size;
+
+ wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
+
+ if (sz == 0) {
+ wil_err(wil, "Cannot allocate a zero size status ring\n");
+ return -EINVAL;
+ }
+
+ sring->swhead = 0;
+
+ /* Status messages are allocated and initialized to 0. This is necessary
+ * since DR bit should be initialized to 0.
+ */
+ sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ if (!sring->va)
+ return -ENOMEM;
+
+ wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
+ &sring->pa);
+
+ return 0;
+}
+
+static int wil_tx_init_edma(struct wil6210_priv *wil)
+{
+ int ring_id = wil_find_free_sring(wil);
+ struct wil_status_ring *sring;
+ int rc;
+ u16 status_ring_size;
+
+ if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->tx_status_ring_order;
+
+ wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
+ status_ring_size, ring_id);
+
+ if (ring_id < 0)
+ return ring_id;
+
+ /* Allocate Tx status ring. Tx descriptor rings will be
+ * allocated on WMI connect event
+ */
+ sring = &wil->srings[ring_id];
+
+ sring->is_rx = false;
+ sring->size = status_ring_size;
+ sring->elem_size = sizeof(struct wil_ring_tx_status);
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_tx_sring_cfg(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+ wil->tx_sring_idx = ring_id;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+/* Allocate one skb for Rx descriptor RING */
+static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
+ struct wil_ring *ring, u32 i)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = wil->rx_buf_len;
+ dma_addr_t pa;
+ u16 buff_id;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ struct wil_rx_buff *rx_buff;
+ struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
+ struct sk_buff *skb;
+ struct wil_rx_enhanced_desc dd, *d = &dd;
+ struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
+ &ring->va[i].rx.enhanced;
+
+ if (unlikely(list_empty(free))) {
+ wil->rx_buff_mgmt.free_list_empty_cnt++;
+ return -EAGAIN;
+ }
+
+ skb = dev_alloc_skb(sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_put(skb, sz);
+
+ /**
+ * Make sure that the network stack calculates checksum for packets
+ * which failed the HW checksum calculation
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ /* Get the buffer ID - the index of the rx buffer in the buff_arr */
+ rx_buff = list_first_entry(free, struct wil_rx_buff, list);
+ buff_id = rx_buff->id;
+
+ /* Move a buffer from the free list to the active list */
+ list_move(&rx_buff->list, active);
+
+ buff_arr[buff_id].skb = skb;
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+ d->dma.length = cpu_to_le16(sz);
+ d->mac.buff_id = cpu_to_le16(buff_id);
+ *_d = *d;
+
+ /* Save the physical address in skb->cb for later use in dma_unmap */
+ memcpy(skb->cb, &pa, sizeof(pa));
+
+ return 0;
+}
+
+static inline
+void wil_get_next_rx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
+ void *msg)
+{
+ struct wil_rx_status_compressed *_msg;
+
+ _msg = (struct wil_rx_status_compressed *)
+ (sring->va + (sring->elem_size * sring->swhead));
+ *dr_bit = WIL_GET_BITS(_msg->d0, 31, 31);
+ /* make sure dr_bit is read before the rest of status msg */
+ rmb();
+ memcpy(msg, (void *)_msg, sring->elem_size);
+}
+
+static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
+{
+ sring->swhead = (sring->swhead + 1) % sring->size;
+ if (sring->swhead == 0)
+ sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
+}
+
+static int wil_rx_refill_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ u32 next_head;
+ int rc = 0;
+ ring->swtail = *ring->edma_rx_swtail.va;
+
+ for (; next_head = wil_ring_next_head(ring),
+ (next_head != ring->swtail);
+ ring->swhead = next_head) {
+ rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
+ if (unlikely(rc)) {
+ if (rc == -EAGAIN)
+ wil_dbg_txrx(wil, "No free buffer ID found\n");
+ else
+ wil_err_ratelimited(wil,
+ "Error %d in refill desc[%d]\n",
+ rc, ring->swhead);
+ break;
+ }
+ }
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return rc;
+}
+
+static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ dma_addr_t pa;
+
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
+ while (!list_empty(active)) {
+ struct wil_rx_buff *rx_buff =
+ list_first_entry(active, struct wil_rx_buff, list);
+ struct sk_buff *skb = rx_buff->skb;
+
+ if (unlikely(!skb)) {
+ wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
+ } else {
+ rx_buff->skb = NULL;
+ memcpy(&pa, skb->cb, sizeof(pa));
+ dma_unmap_single(dev, pa, wil->rx_buf_len,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ }
+
+ /* Move the buffer from the active to the free list */
+ list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
+ }
+}
+
+static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
+ /* Move all the buffers to the free list in case active list is
+ * not empty in order to release all SKBs before deleting the array
+ */
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+
+ kfree(wil->rx_buff_mgmt.buff_arr);
+ wil->rx_buff_mgmt.buff_arr = NULL;
+}
+
+static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
+ size_t size)
+{
+ struct wil_rx_buff *buff_arr;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
+ struct list_head *free = &wil->rx_buff_mgmt.free;
+ int i;
+
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
+ sizeof(struct wil_rx_buff),
+ GFP_KERNEL);
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return -ENOMEM;
+
+ /* Set list heads */
+ INIT_LIST_HEAD(active);
+ INIT_LIST_HEAD(free);
+
+ /* Linkify the list.
+ * buffer id 0 should not be used (marks invalid id).
+ */
+ buff_arr = wil->rx_buff_mgmt.buff_arr;
+ for (i = 1; i <= size; i++) {
+ list_add(&buff_arr[i].list, free);
+ buff_arr[i].id = i;
+ }
+
+ wil->rx_buff_mgmt.size = size + 1;
+
+ return 0;
+}
+
+static int wil_init_rx_sring(struct wil6210_priv *wil,
+ u16 status_ring_size,
+ size_t elem_size,
+ u16 ring_id)
+{
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+
+ wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n",
+ status_ring_size, ring_id);
+
+ memset(&sring->rx_data, 0, sizeof(sring->rx_data));
+
+ sring->is_rx = true;
+ sring->size = status_ring_size;
+ sring->elem_size = elem_size;
+ rc = wil_sring_alloc(wil, sring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_sring_add(wil, ring_id);
+ if (rc)
+ goto out_free;
+
+ sring->desc_rdy_pol = 1;
+
+ return 0;
+out_free:
+ wil_sring_free(wil, sring);
+ return rc;
+}
+
+static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
+ struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = ring->size * sizeof(ring->va[0]);
+
+ wil_dbg_misc(wil, "alloc_desc_ring:\n");
+
+ BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
+
+ ring->swhead = 0;
+ ring->swtail = 0;
+ ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
+ if (!ring->ctx)
+ goto err;
+
+ ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ if (!ring->va)
+ goto err_free_ctx;
+
+ if (ring->is_rx) {
+ sz = sizeof(*ring->edma_rx_swtail.va);
+ ring->edma_rx_swtail.va =
+ dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
+ if (!ring->edma_rx_swtail.va)
+ goto err_free_va;
+ }
+
+ wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
+ ring->is_rx ? "RX" : "TX",
+ ring->size, ring->va, &ring->pa, ring->ctx);
+
+ return 0;
+err_free_va:
+ dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
+ (void *)ring->va, ring->pa);
+ ring->va = NULL;
+err_free_ctx:
+ kfree(ring->ctx);
+ ring->ctx = NULL;
+err:
+ return -ENOMEM;
+}
+
+static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz;
+ int ring_index = 0;
+
+ if (!ring->va)
+ return;
+
+ sz = ring->size * sizeof(ring->va[0]);
+
+ lockdep_assert_held(&wil->mutex);
+ if (ring->is_rx) {
+ wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
+ ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ wil_move_all_rx_buff_to_free_list(wil, ring);
+ dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
+ ring->edma_rx_swtail.va,
+ ring->edma_rx_swtail.pa);
+ goto out;
+ }
+
+ /* TX ring */
+ ring_index = ring - wil->ring_tx;
+
+ wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
+ ring_index, ring->size, ring->va,
+ &ring->pa, ring->ctx);
+
+ while (!wil_ring_is_empty(ring)) {
+ struct wil_ctx *ctx;
+
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_d =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+
+ ctx = &ring->ctx[ring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ ring->swtail);
+ ring->swtail = wil_ring_next_tail(ring);
+ continue;
+ }
+ *d = *_d;
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ ring->swtail = wil_ring_next_tail(ring);
+ }
+
+out:
+ dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
+ kfree(ring->ctx);
+ ring->pa = 0;
+ ring->va = NULL;
+ ring->ctx = NULL;
+}
+
+static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
+ int status_ring_id)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+
+ wil_dbg_misc(wil, "init RX desc ring\n");
+
+ ring->size = desc_ring_size;
+ ring->is_rx = true;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ return rc;
+
+ rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+out_free:
+ wil_ring_free_edma(wil, ring);
+ return rc;
+}
+
+static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid,
+ int *cid, int *mid, u16 *seq,
+ int *mcast, int *retry)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *tid = wil_rx_status_get_tid(s);
+ *cid = wil_rx_status_get_cid(s);
+ *mid = wil_rx_status_get_mid(s);
+ *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
+ *mcast = wil_rx_status_get_mcast(s);
+ *retry = wil_rx_status_get_retry(s);
+}
+
+static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
+ int *security)
+{
+ struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
+
+ *cid = wil_rx_status_get_cid(s);
+ *security = wil_rx_status_get_security(s);
+}
+
+static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wil_rx_status_extended *st;
+ int cid, tid, key_id, mc;
+ struct wil_sta_info *s;
+ struct wil_tid_crypto_rx *c;
+ struct wil_tid_crypto_rx_single *cc;
+ const u8 *pn;
+
+ /* In HW reorder, HW is responsible for crypto check */
+ if (wil->use_rx_hw_reordering)
+ return 0;
+
+ st = wil_skb_rxstatus(skb);
+
+ cid = wil_rx_status_get_cid(st);
+ tid = wil_rx_status_get_tid(st);
+ key_id = wil_rx_status_get_key_id(st);
+ mc = wil_rx_status_get_mcast(st);
+ s = &wil->sta[cid];
+ c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+ cc = &c->key_id[key_id];
+ pn = (u8 *)&st->ext.pn;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
+static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring;
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u8 dr_bit;
+ int i;
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (!sring->va)
+ continue;
+
+ wil_get_next_rx_status_msg(sring, &dr_bit, msg);
+
+ /* Check if there are unhandled RX status messages */
+ if (dr_bit == sring->desc_rdy_pol)
+ return false;
+ }
+
+ return true;
+}
+
+static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
+{
+ /* RX buffer size must be aligned to 4 bytes */
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
+}
+
+static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order)
+{
+ u16 status_ring_size, desc_ring_size = 1 << desc_ring_order;
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ size_t elem_size = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+ int i;
+
+ /* In SW reorder one must use extended status messages */
+ if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
+ wil_err(wil,
+ "compressed RX status cannot be used with SW reorder\n");
+ return -EINVAL;
+ }
+ if (wil->rx_status_ring_order <= desc_ring_order)
+ /* make sure sring is larger than desc ring */
+ wil->rx_status_ring_order = desc_ring_order + 1;
+ if (wil->rx_buff_id_count <= desc_ring_size)
+ /* make sure we will not run out of buff_ids */
+ wil->rx_buff_id_count = desc_ring_size + 512;
+ if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
+ wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
+ wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
+
+ status_ring_size = 1 << wil->rx_status_ring_order;
+
+ wil_dbg_misc(wil,
+ "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
+ desc_ring_size, status_ring_size, elem_size);
+
+ wil_rx_buf_len_init_edma(wil);
+
+ /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
+ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
+ wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
+
+ wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
+ wil->num_rx_status_rings);
+
+ rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len);
+ if (rc)
+ return rc;
+
+ /* Allocate status ring */
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ int sring_id = wil_find_free_sring(wil);
+
+ if (sring_id < 0) {
+ rc = -EFAULT;
+ goto err_free_status;
+ }
+ rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
+ sring_id);
+ if (rc)
+ goto err_free_status;
+ }
+
+ /* Allocate descriptor ring */
+ rc = wil_init_rx_desc_ring(wil, desc_ring_size,
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+ if (rc)
+ goto err_free_status;
+
+ if (wil->rx_buff_id_count >= status_ring_size) {
+ wil_info(wil,
+ "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
+ wil->rx_buff_id_count, status_ring_size,
+ status_ring_size - 1);
+ wil->rx_buff_id_count = status_ring_size - 1;
+ }
+
+ /* Allocate Rx buffer array */
+ rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
+ if (rc)
+ goto err_free_desc;
+
+ /* Fill descriptor ring with credits */
+ rc = wil_rx_refill_edma(wil);
+ if (rc)
+ goto err_free_rx_buff_arr;
+
+ return 0;
+err_free_rx_buff_arr:
+ wil_free_rx_buff_arr(wil);
+err_free_desc:
+ wil_ring_free_edma(wil, ring);
+err_free_status:
+ for (i = 0; i < wil->num_rx_status_rings; i++)
+ wil_sring_free(wil, &wil->srings[i]);
+
+ return rc;
+}
+
+static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_dbg_misc(wil,
+ "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
+ ring_id, cid, tid, wil->tx_sring_idx);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = cid;
+ wil->ring2cid_tid[ring_id][1] = tid;
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
+ goto out_free;
+ }
+
+ if (txdata->dot1x_open && agg_wsize >= 0)
+ wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+ return 0;
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+ wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
+ wil->ring2cid_tid[ring_id][1] = 0;
+
+ out:
+ return rc;
+}
+
+static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id,
+ int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_err(wil, "ring modify is not supported for EDMA\n");
+
+ return -EOPNOTSUPP;
+}
+
+/* This function is used only for RX SW reorder */
+static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
+ struct sk_buff *skb, struct wil_net_stats *stats)
+{
+ u8 ftype;
+ u8 fc1;
+ int mid;
+ int tid;
+ u16 seq;
+ struct wil6210_vif *vif;
+
+ ftype = wil_rx_status_get_frame_type(wil, msg);
+ if (ftype == IEEE80211_FTYPE_DATA)
+ return 0;
+
+ fc1 = wil_rx_status_get_fc1(wil, msg);
+ mid = wil_rx_status_get_mid(msg);
+ tid = wil_rx_status_get_tid(msg);
+ seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
+ vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
+ return -EAGAIN;
+ }
+
+ wil_dbg_txrx(wil,
+ "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ if (stats)
+ stats->rx_non_data_frame++;
+ if (wil_is_back_req(fc1)) {
+ wil_dbg_txrx(wil,
+ "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+ wil_rx_bar(wil, vif, cid, tid, seq);
+ } else {
+ u32 sz = wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended);
+
+ /* print again all info. One can enable only this
+ * without overhead for printing every Rx frame
+ */
+ wil_dbg_txrx(wil,
+ "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+ fc1, mid, cid, tid, seq);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, sz, false);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+ }
+
+ return -EAGAIN;
+}
+
+static int wil_rx_error_check_edma(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ struct wil_net_stats *stats)
+{
+ int l2_rx_status;
+ void *msg = wil_skb_rxstatus(skb);
+
+ l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
+ if (l2_rx_status != 0) {
+ wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
+ l2_rx_status);
+ /* Due to HW issue, KEY error will trigger a MIC error */
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
+ wil_err_ratelimited(wil,
+ "L2 MIC/KEY error, dropping packet\n");
+ stats->rx_mic_error++;
+ }
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
+ wil_err_ratelimited(wil,
+ "L2 KEY error, dropping packet\n");
+ stats->rx_key_error++;
+ }
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
+ wil_err_ratelimited(wil,
+ "L2 REPLAY error, dropping packet\n");
+ stats->rx_replay++;
+ }
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
+ wil_err_ratelimited(wil,
+ "L2 AMSDU error, dropping packet\n");
+ stats->rx_amsdu_error++;
+ }
+ return -EFAULT;
+ }
+
+ skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
+
+ return 0;
+}
+
+static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_rx_status_extended msg1;
+ void *msg = &msg1;
+ u16 buff_id;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ struct wil_ring_rx_data *rxdata = &sring->rx_data;
+ unsigned int sz = wil->rx_buf_len;
+ struct wil_net_stats *stats = NULL;
+ u16 dmalen;
+ int cid;
+ bool eop, headstolen;
+ int delta;
+ u8 dr_bit;
+ u8 data_offset;
+ struct wil_rx_status_extended *s;
+ u16 sring_idx = sring - wil->srings;
+ int invalid_buff_id_retry;
+
+ BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
+
+again:
+ wil_get_next_rx_status_msg(sring, &dr_bit, msg);
+
+ /* Completed handling all the ready status messages */
+ if (dr_bit != sring->desc_rdy_pol)
+ return NULL;
+
+ /* Extract the buffer ID from the status message */
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+
+ invalid_buff_id_retry = 0;
+ while (!buff_id) {
+ struct wil_rx_status_extended *s;
+
+ wil_dbg_txrx(wil,
+ "buff_id is not updated yet by HW, (swhead 0x%x)\n",
+ sring->swhead);
+ if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
+ break;
+
+ /* Read the status message again */
+ s = (struct wil_rx_status_extended *)
+ (sring->va + (sring->elem_size * sring->swhead));
+ *(struct wil_rx_status_extended *)msg = *s;
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ }
+
+ if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
+ wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
+ buff_id, sring->swhead);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
+ wil_rx_status_reset_buff_id(sring);
+ wil_sring_advance_swhead(sring);
+ sring->invalid_buff_id_cnt++;
+ goto again;
+ }
+
+ /* Extract the SKB from the rx_buff management array */
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
+ if (!skb) {
+ wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ wil_rx_status_reset_buff_id(sring);
+ /* Move the buffer from the active list to the free list */
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+ wil_sring_advance_swhead(sring);
+ sring->invalid_buff_id_cnt++;
+ goto again;
+ }
+
+ wil_rx_status_reset_buff_id(sring);
+ wil_sring_advance_swhead(sring);
+
+ memcpy(&pa, skb->cb, sizeof(pa));
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
+
+ trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
+ msg);
+ wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
+ buff_id, sring_idx, dmalen);
+ wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
+ /* Move the buffer from the active list to the free list */
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+
+ eop = wil_rx_status_get_eop(msg);
+
+ cid = wil_rx_status_get_cid(msg);
+ if (unlikely(!wil_val_in_range(cid, 0, wil->max_assoc_sta))) {
+ wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
+ cid, sring->swhead);
+ rxdata->skipping = true;
+ goto skipping;
+ }
+ stats = &wil->sta[cid].stats;
+
+ if (unlikely(dmalen < ETH_HLEN)) {
+ wil_dbg_txrx(wil, "Short frame, len = %d\n", dmalen);
+ stats->rx_short_frame++;
+ rxdata->skipping = true;
+ goto skipping;
+ }
+
+ if (unlikely(dmalen > sz)) {
+ wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
+ stats->rx_large_frame++;
+ rxdata->skipping = true;
+ }
+
+skipping:
+ /* skipping indicates if a certain SKB should be dropped.
+ * It is set in case there is an error on the current SKB or in case
+ * of RX chaining: as long as we manage to merge the SKBs it will
+ * be false. once we have a bad SKB or we don't manage to merge SKBs
+ * it will be set to the !EOP value of the current SKB.
+ * This guarantees that all the following SKBs until EOP will also
+ * get dropped.
+ */
+ if (unlikely(rxdata->skipping)) {
+ kfree_skb(skb);
+ if (rxdata->skb) {
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ }
+ rxdata->skipping = !eop;
+ goto again;
+ }
+
+ skb_trim(skb, dmalen);
+
+ prefetch(skb->data);
+
+ if (!rxdata->skb) {
+ rxdata->skb = skb;
+ } else {
+ if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
+ &delta))) {
+ kfree_skb_partial(skb, headstolen);
+ } else {
+ wil_err(wil, "failed to merge skbs!\n");
+ kfree_skb(skb);
+ kfree_skb(rxdata->skb);
+ rxdata->skb = NULL;
+ rxdata->skipping = !eop;
+ goto again;
+ }
+ }
+
+ if (!eop)
+ goto again;
+
+ /* reaching here rxdata->skb always contains a full packet */
+ skb = rxdata->skb;
+ rxdata->skb = NULL;
+ rxdata->skipping = false;
+
+ if (stats) {
+ stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
+ if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
+ stats->rx_per_mcs[stats->last_mcs_rx]++;
+ else if (stats->last_mcs_rx == WIL_EXTENDED_MCS_26)
+ stats->rx_per_mcs[WIL_BASE_MCS_FOR_EXTENDED_26]++;
+
+ stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg);
+ }
+
+ if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
+ wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
+ kfree_skb(skb);
+ goto again;
+ }
+
+ /* Compensate for the HW data alignment according to the status
+ * message
+ */
+ data_offset = wil_rx_status_get_data_offset(msg);
+ if (data_offset == 0xFF ||
+ data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
+ wil_err(wil, "Unexpected data offset %d\n", data_offset);
+ kfree_skb(skb);
+ goto again;
+ }
+
+ skb_pull(skb, data_offset);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ /* Has to be done after dma_unmap_single as skb->cb is also
+ * used for holding the pa
+ */
+ s = wil_skb_rxstatus(skb);
+ memcpy(s, msg, sring->elem_size);
+
+ return skb;
+}
+
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
+{
+ struct net_device *ndev;
+ struct wil_ring *ring = &wil->ring_rx;
+ struct wil_status_ring *sring;
+ struct sk_buff *skb;
+ int i;
+
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "rx_handle\n");
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ sring = &wil->srings[i];
+ if (unlikely(!sring->va)) {
+ wil_err(wil,
+ "Rx IRQ while Rx status ring %d not yet initialized\n",
+ i);
+ continue;
+ }
+
+ while ((*quota > 0) &&
+ (NULL != (skb =
+ wil_sring_reap_rx_edma(wil, sring)))) {
+ (*quota)--;
+ if (wil->use_rx_hw_reordering) {
+ void *msg = wil_skb_rxstatus(skb);
+ int mid = wil_rx_status_get_mid(msg);
+ struct wil6210_vif *vif = wil->vifs[mid];
+
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil,
+ "RX desc invalid mid %d",
+ mid);
+ kfree_skb(skb);
+ continue;
+ }
+ ndev = vif_to_ndev(vif);
+ wil_netif_rx_any(skb, ndev);
+ } else {
+ wil_rx_reorder(wil, skb);
+ }
+ }
+
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+ }
+
+ wil_rx_refill_edma(wil);
+}
+
+static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
+ dma_addr_t pa,
+ u32 len,
+ int ring_index)
+{
+ struct wil_tx_enhanced_desc *d =
+ (struct wil_tx_enhanced_desc *)&desc->enhanced;
+
+ memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
+
+ wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
+
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.length = cpu_to_le16((u16)len);
+ d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
+ * 3 - eth mode
+ */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static inline void
+wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
+ struct wil_ring_tx_status *msg)
+{
+ struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
+ (sring->va + (sring->elem_size * sring->swhead));
+
+ *dr_bit = _msg->desc_ready >> TX_STATUS_DESC_READY_POS;
+ /* make sure dr_bit is read before the rest of status msg */
+ rmb();
+ *msg = *_msg;
+}
+
+/* Clean up transmitted skb's from the Tx descriptor RING.
+ * Return number of descriptors cleared.
+ */
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring)
+{
+ struct net_device *ndev;
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ring *ring = NULL;
+ struct wil_ring_tx_data *txdata;
+ /* Total number of completed descriptors in all descriptor rings */
+ int desc_cnt = 0;
+ int cid;
+ struct wil_net_stats *stats;
+ struct wil_tx_enhanced_desc *_d;
+ unsigned int ring_id;
+ unsigned int num_descs, num_statuses = 0;
+ int i;
+ u8 dr_bit; /* Descriptor Ready bit */
+ struct wil_ring_tx_status msg;
+ struct wil6210_vif *vif;
+ int used_before_complete;
+ int used_new;
+
+ wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
+
+ /* Process completion messages while DR bit has the expected polarity */
+ while (dr_bit == sring->desc_rdy_pol) {
+ num_descs = msg.num_descriptors;
+ if (!num_descs) {
+ wil_err(wil, "invalid num_descs 0\n");
+ goto again;
+ }
+
+ /* Find the corresponding descriptor ring */
+ ring_id = msg.ring_id;
+
+ if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
+ wil_err(wil, "invalid ring id %d\n", ring_id);
+ goto again;
+ }
+ ring = &wil->ring_tx[ring_id];
+ if (unlikely(!ring->va)) {
+ wil_err(wil, "Tx irq[%d]: ring not initialized\n",
+ ring_id);
+ goto again;
+ }
+ txdata = &wil->ring_tx_data[ring_id];
+ if (unlikely(!txdata->enabled)) {
+ wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
+ goto again;
+ }
+ vif = wil->vifs[txdata->mid];
+ if (unlikely(!vif)) {
+ wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
+ txdata->mid, ring_id);
+ goto again;
+ }
+
+ ndev = vif_to_ndev(vif);
+
+ cid = wil->ring2cid_tid[ring_id][0];
+ stats = (cid < wil->max_assoc_sta) ? &wil->sta[cid].stats :
+ NULL;
+
+ wil_dbg_txrx(wil,
+ "tx_status: completed desc_ring (%d), num_descs (%d)\n",
+ ring_id, num_descs);
+
+ used_before_complete = wil_ring_used_tx(ring);
+
+ for (i = 0 ; i < num_descs; ++i) {
+ struct wil_ctx *ctx = &ring->ctx[ring->swtail];
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ u16 dmalen;
+ struct sk_buff *skb = ctx->skb;
+
+ _d = (struct wil_tx_enhanced_desc *)
+ &ring->va[ring->swtail].tx.enhanced;
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
+ wil_dbg_txrx(wil,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
+ ring_id, ring->swtail, dmalen,
+ msg.status);
+ wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)&msg, sizeof(msg),
+ false);
+
+ wil_tx_desc_unmap_edma(dev,
+ (union wil_tx_desc *)d,
+ ctx);
+
+ if (skb) {
+ if (likely(msg.status == 0)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ wil_tx_latency_calc(wil, skb,
+ &wil->sta[cid]);
+ }
+ } else {
+ ndev->stats.tx_errors++;
+ if (stats)
+ stats->tx_errors++;
+ }
+
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ wil_tx_complete_handle_eapol(vif, skb);
+
+ wil_consume_skb(skb, msg.status == 0);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_ring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
+
+ ring->swtail = wil_ring_next_tail(ring);
+
+ desc_cnt++;
+ }
+
+ /* performance monitoring */
+ used_new = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ring_id, used_before_complete, used_new);
+ txdata->last_idle = get_cycles();
+ }
+
+again:
+ num_statuses++;
+ if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL == 0)
+ /* update HW tail to allow HW to push new statuses */
+ wil_w(wil, sring->hwtail, sring->swhead);
+
+ wil_sring_advance_swhead(sring);
+
+ wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
+ }
+
+ /* shall we wake net queues? */
+ if (desc_cnt)
+ wil_update_net_queues(wil, vif, NULL, false);
+
+ if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL != 0)
+ /* Update the HW tail ptr (RD ptr) */
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+
+ return desc_cnt;
+}
+
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
+ int tso_desc_type, bool is_ipv4,
+ int tcp_hdr_len,
+ int skb_net_hdr_len,
+ int mss)
+{
+ /* Number of descriptors */
+ d->mac.d[2] |= 1;
+ /* Maximum Segment Size */
+ d->mac.tso_mss |= cpu_to_le16(mss >> 2);
+ /* L4 header len: TCP header length */
+ d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
+ /* EOP, TSO desc type, Segmentation enable,
+ * Insert IPv4 and TCP / UDP Checksum
+ */
+ d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
+ tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
+ BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
+ /* Calculate pseudo-header */
+ d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
+ BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
+ /* IP Header Length */
+ d->dma.ip_length |= skb_net_hdr_len;
+ /* MAC header length and IP address family*/
+ d->dma.b11 |= ETH_HLEN |
+ is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+}
+
+static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
+ int len, uint i, int tso_desc_type,
+ skb_frag_t *frag, struct wil_ring *ring,
+ struct sk_buff *skb, bool is_ipv4,
+ int tcp_hdr_len, int skb_net_hdr_len,
+ int mss, int *descs_used)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+ struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
+ int ring_index = ring - wil->ring_tx;
+ dma_addr_t pa;
+
+ if (len == 0)
+ return 0;
+
+ if (!frag) {
+ pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_single;
+ } else {
+ pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ ring->ctx[i].mapped_as = wil_mapped_as_page;
+ }
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ wil_err(wil, "TSO: Skb DMA map error\n");
+ return -EINVAL;
+ }
+
+ wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
+ len, ring_index);
+ wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
+ tcp_hdr_len,
+ skb_net_hdr_len, mss);
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ if (tso_desc_type == wil_tso_type_lst)
+ ring->ctx[i].skb = skb_get(skb);
+
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ *_desc = *d;
+ (*descs_used)++;
+
+ return 0;
+}
+
+static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ struct wil_ring *ring,
+ struct sk_buff *skb)
+{
+ int ring_index = ring - wil->ring_tx;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
+ int used, avail = wil_ring_avail_tx(ring);
+ int f, hdrlen, headlen;
+ int gso_type;
+ bool is_ipv4;
+ u32 swhead = ring->swhead;
+ int descs_used = 0; /* total number of used descriptors */
+ int rc = -EINVAL;
+ int tcp_hdr_len;
+ int skb_net_hdr_len;
+ int mss = skb_shinfo(skb)->gso_size;
+
+ wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
+ ring_index);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ if (unlikely(avail < min_desc_required)) {
+ wil_err_ratelimited(wil,
+ "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+ ring_index, min_desc_required);
+ return -ENOMEM;
+ }
+
+ gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+ switch (gso_type) {
+ case SKB_GSO_TCPV4:
+ is_ipv4 = true;
+ break;
+ case SKB_GSO_TCPV6:
+ is_ipv4 = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EINVAL;
+
+ /* tcp header length and skb network header length are fixed for all
+ * packet's descriptors - read them once here
+ */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ skb_net_hdr_len = skb_network_header_len(skb);
+
+ /* First descriptor must contain the header only
+ * Header Length = MAC header len + IP header len + TCP header len
+ */
+ hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
+ wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
+ hdrlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
+ wil_tso_type_hdr, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ return -EINVAL;
+
+ /* Second descriptor contains the head */
+ headlen = skb_headlen(skb) - hdrlen;
+ wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
+ rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
+ (swhead + descs_used) % ring->size,
+ (nr_frags != 0) ? wil_tso_type_first :
+ wil_tso_type_lst, NULL, ring, skb,
+ is_ipv4, tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+
+ /* Rest of the descriptors are from the SKB fragments */
+ for (f = 0; f < nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+
+ wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
+ len, descs_used);
+
+ rc = wil_tx_tso_gen_desc(wil, NULL, len,
+ (swhead + descs_used) % ring->size,
+ (f != nr_frags - 1) ?
+ wil_tso_type_mid : wil_tso_type_lst,
+ frag, ring, skb, is_ipv4,
+ tcp_hdr_len, skb_net_hdr_len,
+ mss, &descs_used);
+ if (rc)
+ goto mem_error;
+ }
+
+ /* performance monitoring */
+ used = wil_ring_used_tx(ring);
+ if (wil_val_in_range(wil->ring_idle_trsh,
+ used, used + descs_used)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ ring_index, used, used + descs_used);
+ }
+
+ /* advance swhead */
+ wil_ring_advance_head(ring, descs_used);
+ wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
+ if (wil->tx_latency)
+ *(ktime_t *)&skb->cb = ktime_get();
+ else
+ memset(skb->cb, 0, sizeof(ktime_t));
+
+ wil_w(wil, ring->hwtail, ring->swhead);
+
+ return 0;
+
+mem_error:
+ while (descs_used > 0) {
+ struct device *dev = wil_to_dev(wil);
+ struct wil_ctx *ctx;
+ int i = (swhead + descs_used - 1) % ring->size;
+ struct wil_tx_enhanced_desc dd, *d = &dd;
+ struct wil_tx_enhanced_desc *_desc =
+ (struct wil_tx_enhanced_desc *)
+ &ring->va[i].tx.enhanced;
+
+ *d = *_desc;
+ ctx = &ring->ctx[i];
+ wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
+ memset(ctx, 0, sizeof(*ctx));
+ descs_used--;
+ }
+ return rc;
+}
+
+static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
+ int size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
+ ring_id, wil->tx_sring_idx);
+
+ lockdep_assert_held(&wil->mutex);
+
+ wil_tx_data_init(txdata);
+ ring->size = size;
+ ring->is_rx = false;
+ rc = wil_ring_alloc_desc_ring(wil, ring);
+ if (rc)
+ goto out;
+
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[ring_id][1] = 0; /* TID */
+ if (!vif->privacy)
+ txdata->dot1x_open = true;
+
+ rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
+ if (rc)
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
+ spin_unlock_bh(&txdata->lock);
+ wil_ring_free_edma(wil, ring);
+
+out:
+ return rc;
+}
+
+static void wil_tx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
+
+ wil_dbg_misc(wil, "free TX sring\n");
+
+ wil_sring_free(wil, sring);
+}
+
+static void wil_rx_data_free(struct wil_status_ring *sring)
+{
+ if (!sring)
+ return;
+
+ kfree_skb(sring->rx_data.skb);
+ sring->rx_data.skb = NULL;
+}
+
+static void wil_rx_fini_edma(struct wil6210_priv *wil)
+{
+ struct wil_ring *ring = &wil->ring_rx;
+ int i;
+
+ wil_dbg_misc(wil, "rx_fini_edma\n");
+
+ wil_ring_free_edma(wil, ring);
+
+ for (i = 0; i < wil->num_rx_status_rings; i++) {
+ wil_rx_data_free(&wil->srings[i]);
+ wil_sring_free(wil, &wil->srings[i]);
+ }
+
+ wil_free_rx_buff_arr(wil);
+}
+
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
+{
+ wil->txrx_ops.configure_interrupt_moderation =
+ wil_configure_interrupt_moderation_edma;
+ /* TX ops */
+ wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
+ wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
+ wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
+ wil->txrx_ops.tx_init = wil_tx_init_edma;
+ wil->txrx_ops.tx_fini = wil_tx_fini_edma;
+ wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
+ wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
+ wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+ wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma;
+ /* RX ops */
+ wil->txrx_ops.rx_init = wil_rx_init_edma;
+ wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
+ wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
+ wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
+ wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
+ wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
+ wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
+ wil->txrx_ops.rx_fini = wil_rx_fini_edma;
+}
+
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
new file mode 100644
index 000000000..ee90e225b
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef WIL6210_TXRX_EDMA_H
+#define WIL6210_TXRX_EDMA_H
+
+#include "wil6210.h"
+
+/* limit status ring size in range [ring size..max ring size] */
+#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
+#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
+/* RX sring order should be bigger than RX ring order */
+#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (14)
+#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600)
+
+#define WIL_DEFAULT_RX_STATUS_RING_ID 0
+#define WIL_RX_DESC_RING_ID 0
+#define WIL_RX_STATUS_IRQ_IDX 0
+#define WIL_TX_STATUS_IRQ_IDX 1
+
+#define WIL_EDMA_AGG_WATERMARK (0xffff)
+#define WIL_EDMA_AGG_WATERMARK_POS (16)
+
+#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
+#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
+
+/* Error field */
+#define WIL_RX_EDMA_ERROR_MIC (1)
+#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */
+#define WIL_RX_EDMA_ERROR_REPLAY (3)
+#define WIL_RX_EDMA_ERROR_AMSDU (4)
+#define WIL_RX_EDMA_ERROR_FCS (7)
+
+#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1))
+#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1))
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11)
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2
+#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4
+
+#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
+
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(20)
+
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
+#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
+
+#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
+#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
+#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
+
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
+#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
+#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
+#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
+
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
+#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
+
+/* Enhanced Rx descriptor - MAC part
+ * [dword 0] : Reserved
+ * [dword 1] : Reserved
+ * [dword 2] : Reserved
+ * [dword 3]
+ * bit 0..15 : Buffer ID
+ * bit 16..31 : Reserved
+ */
+struct wil_ring_rx_enhanced_mac {
+ u32 d[3];
+ __le16 buff_id;
+ u16 reserved;
+} __packed;
+
+/* Enhanced Rx descriptor - DMA part
+ * [dword 0] - Reserved
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..31 : Reserved
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_rx_enhanced_dma {
+ u32 d0;
+ struct wil_ring_dma_addr addr;
+ u16 w5;
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+struct wil_rx_enhanced_desc {
+ struct wil_ring_rx_enhanced_mac mac;
+ struct wil_ring_rx_enhanced_dma dma;
+} __packed;
+
+/* Enhanced Tx descriptor - DMA part
+ * [dword 0]
+ * Same as legacy
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
+ * [dword 2]
+ * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
+ * bit 16..31 : length
+ */
+struct wil_ring_tx_enhanced_dma {
+ u8 l4_hdr_len;
+ u8 cmd;
+ u16 w1;
+ struct wil_ring_dma_addr addr;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ __le16 addr_high_high;
+ __le16 length;
+} __packed;
+
+/* Enhanced Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrupt_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5..14 : reserved0:10
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..23 : reserved0:2
+ * bit 24 : Dest ID extension:1
+ * bit 25..31 : reserved0:7
+ * [dword 3]
+ * bit 0..15 : tso_mss:16
+ * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
+ */
+struct wil_ring_tx_enhanced_mac {
+ u32 d[3];
+ __le16 tso_mss;
+ u16 scratchpad;
+} __packed;
+
+struct wil_tx_enhanced_desc {
+ struct wil_ring_tx_enhanced_mac mac;
+ struct wil_ring_tx_enhanced_dma dma;
+} __packed;
+
+#define TX_STATUS_DESC_READY_POS 7
+
+/* Enhanced TX status message
+ * [dword 0]
+ * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that
+ * are used to form the packets. It is needed for WB when
+ * releasing the packet
+ * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to
+ * the message
+ * bit 16..23 : Status:8 - The TX status Code
+ * 0x0 - A successful transmission
+ * 0x1 - Retry expired
+ * 0x2 - Lifetime Expired
+ * 0x3 - Released
+ * 0x4-0xFF - Reserved
+ * bit 24..30 : Reserved:7
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set by the HW
+ * to one for each completed status message. Each wrap around,
+ * the DR bit value is flipped.
+ * [dword 1]
+ * bit 0..31 : timestamp:32 - Set when MPDU is transmitted.
+ * [dword 2]
+ * bit 0.. 4 : MCS:5 - The transmitted MCS value
+ * bit 5 : Reserved:1
+ * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
+ * bit 8..12 : QID:5 - The QID that was used for the transmission
+ * bit 13..15 : Reserved:3
+ * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
+ * bit 21..22 : Reserved:2
+ * bit 23 : Retry:1 - An indication that the transmission was retried
+ * bit 24..31 : TX-Sector:8 - the antenna sector that was used for
+ * transmission
+ * [dword 3]
+ * bit 0..11 : Sequence number:12 - The Sequence Number that was used
+ * for the MPDU transmission
+ * bit 12..31 : Reserved:20
+ */
+struct wil_ring_tx_status {
+ u8 num_descriptors;
+ u8 ring_id;
+ u8 status;
+ u8 desc_ready; /* Only the last bit should be set */
+ u32 timestamp;
+ u32 d2;
+ u16 seq_number; /* Only the first 12 bits */
+ u16 w7;
+} __packed;
+
+/* Enhanced Rx status message - compressed part
+ * [dword 0]
+ * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
+ * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
+ * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
+ * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
+ * calculated, Bit1- L3Err - IPv4 Checksum Error
+ * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
+ * calculated, Bit1- L4Err - TCP/UDP Checksum Error
+ * bit 7 : Reserved:1
+ * bit 8..19 : Flow ID:12 - MSDU flow ID
+ * bit 20 : MID_V:1 - The MAC ID field is valid
+ * bit 21..22 : MID:2 - The MAC ID
+ * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
+ * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
+ * bit 25 : BC:1 - The received MPDU is broadcast
+ * bit 26 : MC:1 - The received MPDU is multicast
+ * bit 27 : Raw:1 - The MPDU received with no translation
+ * bit 28 : Sec:1 - The FC control (b14) - Frame Protected
+ * bit 29 : Error:1 - An error is set when (L2 status != 0) ||
+ * (L3 status == 3) || (L4 status == 3)
+ * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end
+ * of the transfer, otherwise the status indicates buffer
+ * only completion.
+ * bit 31 : Descriptor Ready bit:1 - It is initiated to
+ * zero by the driver when the ring is created. It is set
+ * by the HW to one for each completed status message.
+ * Each wrap around, the DR bit value is flipped.
+ * [dword 1]
+ * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
+ * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header
+ * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
+ * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
+ * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
+ * bit 24..27 : Data Offset:4 - The data offset, a code that describe the
+ * payload shift from the beginning of the buffer:
+ * 0 - 0 Bytes, 3 - 2 Bytes
+ * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
+ * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
+ * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
+ * bit 31 : Key ID:1 - The extracted Key ID from the encryption header
+ * [dword 2]
+ * bit 0..15 : Buffer ID:16 - The Buffer Identifier
+ * bit 16..31 : Length:16 - It indicates the valid bytes that are stored
+ * in the current descriptor buffer. For multiple buffer
+ * descriptor, SW need to sum the total descriptor length
+ * in all buffers to produce the packet length
+ * [dword 3]
+ * bit 0..31 : timestamp:32 - The MPDU Timestamp.
+ */
+struct wil_rx_status_compressed {
+ u32 d0;
+ u32 d1;
+ __le16 buff_id;
+ __le16 length;
+ u32 timestamp;
+} __packed;
+
+/* Enhanced Rx status message - extension part
+ * [dword 0]
+ * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received
+ * from
+ * bit 5.. 7 : Reserved:3
+ * bit 8..11 : TID:4 - The QoS (b3-0) TID Field
+ * bit 12..15 Source index:4 - The Source index that was found
+ during Parsing the TA. This field is used to define the
+ source of the packet
+ * bit 16..18 : Destination index:3 - The Destination index that
+ was found during Parsing the RA.
+ * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
+ * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
+ interrupt after it writes the packet
+ * bit 23 : ESOP:1 - The QoS (b4) ESOP field
+ * bit 24 : RDG:1
+ * bit 25..31 : Reserved:7
+ * [dword 1]
+ * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
+ (management, data, control and extension)
+ * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
+ * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
+ * Subtype
+ * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
+ * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
+ * bit 15..23 : Reserved:9
+ * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
+ * MPDU
+ * [dword 2]
+ * bit 0..11 : SN:12 - The received Sequence number field
+ * bit 12..15 : Reserved:4
+ * bit 16..31 : PN bits [15:0]:16
+ * [dword 3]
+ * bit 0..31 : PN bits [47:16]:32
+ */
+struct wil_rx_status_extension {
+ u32 d0;
+ u32 d1;
+ __le16 seq_num; /* only lower 12 bits */
+ struct_group_attr(pn, __packed,
+ u16 pn_15_0;
+ u32 pn_47_16;
+ );
+} __packed;
+
+struct wil_rx_status_extended {
+ struct wil_rx_status_compressed comp;
+ struct wil_rx_status_extension ext;
+} __packed;
+
+static inline void *wil_skb_rxstatus(struct sk_buff *skb)
+{
+ return (void *)skb->cb;
+}
+
+static inline __le16 wil_rx_status_get_length(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->length;
+}
+
+static inline u8 wil_rx_status_get_mcs(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 16, 21);
+}
+
+static inline u8 wil_rx_status_get_cb_mode(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 22, 23);
+}
+
+static inline u16 wil_rx_status_get_flow_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 8, 19);
+}
+
+static inline u8 wil_rx_status_get_mcast(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 26, 26);
+}
+
+/**
+ * In case of DLPF miss the parsing of flow Id should be as follows:
+ * dest_id:2
+ * src_id :3 - cid
+ * tid:3
+ * Otherwise:
+ * tid:4
+ * cid:4
+ */
+
+static inline u8 wil_rx_status_get_cid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* CID is in bits 2..4 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* CID is in bits 4..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
+ WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
+}
+
+static inline u8 wil_rx_status_get_tid(void *msg)
+{
+ u16 val = wil_rx_status_get_flow_id(msg);
+
+ if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
+ /* TID is in bits 5..7 */
+ return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
+ WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+ else
+ /* TID is in bits 0..3 */
+ return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
+}
+
+static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 30, 30);
+}
+
+static inline void wil_rx_status_reset_buff_id(struct wil_status_ring *s)
+{
+ ((struct wil_rx_status_compressed *)
+ (s->va + (s->elem_size * s->swhead)))->buff_id = 0;
+}
+
+static inline __le16 wil_rx_status_get_buff_id(void *msg)
+{
+ return ((struct wil_rx_status_compressed *)msg)->buff_id;
+}
+
+static inline u8 wil_rx_status_get_data_offset(void *msg)
+{
+ u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 24, 27);
+
+ switch (val) {
+ case 0: return 0;
+ case 3: return 2;
+ default: return 0xFF;
+ }
+}
+
+static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
+ void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return IEEE80211_FTYPE_DATA;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 1) << 2;
+}
+
+static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
+ 0, 5) << 2;
+}
+
+static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
+{
+ if (wil->use_compressed_rx_status)
+ return 0;
+
+ return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
+}
+
+static inline u8 wil_rx_status_get_retry(void *msg)
+{
+ /* retry bit is missing in EDMA HW. return 1 to be on the safe side */
+ return 1;
+}
+
+static inline int wil_rx_status_get_mid(void *msg)
+{
+ if (!(((struct wil_rx_status_compressed *)msg)->d0 &
+ WIL_RX_EDMA_MID_VALID_BIT))
+ return 0; /* use the default MID */
+
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 21, 22);
+}
+
+static inline int wil_rx_status_get_error(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 29, 29);
+}
+
+static inline int wil_rx_status_get_l2_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 0, 2);
+}
+
+static inline int wil_rx_status_get_l3_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 3, 4);
+}
+
+static inline int wil_rx_status_get_l4_rx_status(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 5, 6);
+}
+
+/* L4 L3 Expected result
+ * 0 0 Ok. No L3 and no L4 known protocols found.
+ * Treated as L2 packet. (no offloads on this packet)
+ * 0 1 Ok. It means that L3 was found, and checksum check passed.
+ * No known L4 protocol was found.
+ * 0 2 It means that L3 protocol was found, and checksum check failed.
+ * No L4 known protocol was found.
+ * 1 any Ok. It means that L4 was found, and checksum check passed.
+ * 3 0 Not a possible scenario.
+ * 3 1 Recalculate. It means that L3 protocol was found, and checksum
+ * passed. But L4 checksum failed. Need to see if really failed,
+ * or due to fragmentation.
+ * 3 2 Both L3 and L4 checksum check failed.
+ */
+static inline int wil_rx_status_get_checksum(void *msg,
+ struct wil_net_stats *stats)
+{
+ int l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+ int l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+
+ if (l4_rx_status == 1)
+ return CHECKSUM_UNNECESSARY;
+
+ if (l4_rx_status == 0 && l3_rx_status == 1)
+ return CHECKSUM_UNNECESSARY;
+
+ if (l3_rx_status == 0 && l4_rx_status == 0)
+ /* L2 packet */
+ return CHECKSUM_NONE;
+
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW doesn't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ stats->rx_csum_err++;
+ return CHECKSUM_NONE;
+}
+
+static inline int wil_rx_status_get_security(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
+ 28, 28);
+}
+
+static inline u8 wil_rx_status_get_key_id(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 31, 31);
+}
+
+static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
+{
+ return WIL_GET_BITS(msg->d2, 0, 4);
+}
+
+static inline u32 wil_ring_next_head(struct wil_ring *ring)
+{
+ return (ring->swhead + 1) % ring->size;
+}
+
+static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
+ __le16 *addr_high_high,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+ *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
+}
+
+static inline
+dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+static inline
+dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
+{
+ return le32_to_cpu(dma->addr.addr_low) |
+ ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
+ ((u64)le16_to_cpu(dma->addr_high_high) << 48);
+}
+
+void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
+int wil_tx_sring_handler(struct wil6210_priv *wil,
+ struct wil_status_ring *sring);
+void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
+void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
+
+#endif /* WIL6210_TXRX_EDMA_H */
+
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644
index 000000000..22a6eb3e1
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -0,0 +1,1449 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/timex.h>
+#include <linux/types.h>
+#include <linux/irqreturn.h>
+#include "wmi.h"
+#include "wil_platform.h"
+#include "fw.h"
+
+extern bool no_fw_recovery;
+extern unsigned int mtu_max;
+extern unsigned short rx_ring_overflow_thrsh;
+extern int agg_wsize;
+extern bool rx_align_2;
+extern bool rx_large_buf;
+extern bool debug_fw;
+extern bool disable_ap_sme;
+extern bool ftm_mode;
+extern bool drop_if_ring_full;
+extern uint max_assoc_sta;
+
+struct wil6210_priv;
+struct wil6210_vif;
+union wil_tx_desc;
+
+#define WIL_NAME "wil6210"
+
+#define WIL_FW_NAME_DEFAULT "wil6210.fw"
+#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
+
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
+#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+
+#define WIL_FW_NAME_TALYN "wil6436.fw"
+#define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw"
+#define WIL_BRD_NAME_TALYN "wil6436.brd"
+
+#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
+
+#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
+#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
+
+#define WIL_NUM_LATENCY_BINS 200
+
+/* maximum number of virtual interfaces the driver supports
+ * (including the main interface)
+ */
+#define WIL_MAX_VIFS 4
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+ return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
+#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
+
+#define WIL_TX_Q_LEN_DEFAULT (4000)
+#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT (11)
+#define WIL_TX_RING_SIZE_ORDER_DEFAULT (12)
+#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
+#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
+/* limit ring size in range [32..32k] */
+#define WIL_RING_SIZE_ORDER_MIN (5)
+#define WIL_RING_SIZE_ORDER_MAX (15)
+#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
+#define WIL6210_MAX_CID (20) /* max number of stations */
+#define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */
+#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
+#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
+#define WIL6210_MAX_STATUS_RINGS (8)
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
+#define WIL_EXTENDED_MCS_26 (26) /* FW reports MCS 12.1 to driver as "26" */
+#define WIL_BASE_MCS_FOR_EXTENDED_26 (7) /* MCS 7 is base MCS for MCS 12.1 */
+#define WIL_EXTENDED_MCS_CHECK(x) (((x) == WIL_EXTENDED_MCS_26) ? "12.1" : #x)
+
+/* Hardware offload block adds the following:
+ * 26 bytes - 3-address QoS data header
+ * 8 bytes - IV + EIV (for GCMP)
+ * 8 bytes - SNAP
+ * 16 bytes - MIC (for GCMP)
+ * 4 bytes - CRC
+ */
+#define WIL_MAX_MPDU_OVERHEAD (62)
+
+struct wil_suspend_count_stats {
+ unsigned long successful_suspends;
+ unsigned long successful_resumes;
+ unsigned long failed_suspends;
+ unsigned long failed_resumes;
+};
+
+struct wil_suspend_stats {
+ struct wil_suspend_count_stats r_off;
+ struct wil_suspend_count_stats r_on;
+ unsigned long rejected_by_device; /* only radio on */
+ unsigned long rejected_by_host;
+};
+
+/* Calculate MAC buffer size for the firmware. It includes all overhead,
+ * as it will go over the air, and need to be 8 byte aligned
+ */
+static inline u32 wil_mtu2macbuf(u32 mtu)
+{
+ return ALIGN(mtu + WIL_MAX_MPDU_OVERHEAD, 8);
+}
+
+/* MTU for Ethernet need to take into account 8-byte SNAP header
+ * to be added when encapsulating Ethernet frame into 802.11
+ */
+#define WIL_MAX_ETH_MTU (IEEE80211_MAX_DATA_LEN_DMG - 8)
+/* Max supported by wil6210 value for interrupt threshold is 5sec. */
+#define WIL6210_ITR_TRSH_MAX (5000000)
+#define WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
+#define WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
+#define WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
+#define WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
+#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
+#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
+#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
+#define WIL6210_DISCONNECT_TO_MS (2000)
+#define WIL6210_RX_HIGH_TRSH_INIT (0)
+#define WIL6210_RX_HIGH_TRSH_DEFAULT \
+ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
+#define WIL_MAX_DMG_AID 254 /* for DMG only 1-254 allowed (see
+ * 802.11REVmc/D5.0, section 9.4.1.8)
+ */
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File | Host addr | FW addr
+ * | |
+ * user_rgf | 0x000000 | 0x880000
+ * dma_rgf | 0x001000 | 0x881000
+ * pcie_rgf | 0x002000 | 0x882000
+ * | |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF (0x880000UL)
+
+#define HOSTADDR(fwaddr) (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+ u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+ u32 ICR; /* Cause, W1C/COR depending on ICC */
+ u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+ u32 ICS; /* Cause Set, WO */
+ u32 IMV; /* Mask, RW+S/C */
+ u32 IMS; /* Mask Set, write 1 to set */
+ u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USAGE_1 (0x880004)
+#define RGF_USER_USAGE_2 (0x880008)
+#define RGF_USER_USAGE_6 (0x880018)
+ #define BIT_USER_OOB_MODE BIT(31)
+ #define BIT_USER_OOB_R2_MODE BIT(30)
+#define RGF_USER_USAGE_8 (0x880020)
+ #define BIT_USER_PREVENT_DEEP_SLEEP BIT(0)
+ #define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1)
+ #define BIT_USER_EXT_CLK BIT(2)
+#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
+ #define HW_MACHINE_BOOT_DONE (0x3fffffd)
+#define RGF_USER_USER_CPU_0 (0x8801e0)
+ #define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */
+#define RGF_USER_CPU_PC (0x8801e8)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+ #define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_BL (0x880A3C) /* Boot Loader */
+#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
+#define RGF_USER_FW_CALIB_RESULT (0x880a90) /* b0-7:result
+ * b8-15:signature
+ */
+ #define CALIB_RESULT_SIGNATURE (0x11)
+#define RGF_USER_CLKS_CTL_0 (0x880abc)
+ #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */
+ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+ #define BIT_HPAL_PERST_FROM_PAD BIT(6)
+ #define BIT_CAR_PERST_RST BIT(7)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
+#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0 (0x880c18)
+#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
+#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
+ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
+#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
+ #define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0)
+ #define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
+#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
+#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
+#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
+#define RGF_USER_XPM_IFC_RD_TIME4 (0x880cf8)
+#define RGF_USER_XPM_IFC_RD_TIME5 (0x880cfc)
+#define RGF_USER_XPM_IFC_RD_TIME6 (0x880d00)
+#define RGF_USER_XPM_IFC_RD_TIME7 (0x880d04)
+#define RGF_USER_XPM_IFC_RD_TIME8 (0x880d08)
+#define RGF_USER_XPM_IFC_RD_TIME9 (0x880d0c)
+#define RGF_USER_XPM_IFC_RD_TIME10 (0x880d10)
+#define RGF_USER_XPM_RD_DOUT_SAMPLE_TIME (0x880d64)
+
+#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
+ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
+ #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
+ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+ #define BIT_DMA_EP_RX_ICR_RX_HTRSH BIT(1)
+#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
+ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
+ #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_HALP BIT(27)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
+
+/* Legacy interrupt moderation control (before Sparrow v2)*/
+#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL (0x881c64)
+ #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
+ #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
+ #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
+ #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+
+/* Offload control (Sparrow B0+) */
+#define RGF_DMA_OFUL_NID_0 (0x881cd4)
+ #define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN BIT(0)
+ #define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN BIT(1)
+ #define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC BIT(2)
+ #define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC BIT(3)
+
+/* New (sparrow v2+) interrupt moderation control */
+#define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40)
+#define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34)
+#define RGF_DMA_ITR_TX_CNT_DATA (0x881d38)
+#define RGF_DMA_ITR_TX_CNT_CTL (0x881d3c)
+ #define BIT_DMA_ITR_TX_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_TX_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_TX_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_TX_CNT_CTL_REACHED_TRESH BIT(4)
+ #define BIT_DMA_ITR_TX_CNT_CTL_CROSS_EN BIT(5)
+ #define BIT_DMA_ITR_TX_CNT_CTL_FREE_RUNNIG BIT(6)
+#define RGF_DMA_ITR_TX_IDL_CNT_TRSH (0x881d60)
+#define RGF_DMA_ITR_TX_IDL_CNT_DATA (0x881d64)
+#define RGF_DMA_ITR_TX_IDL_CNT_CTL (0x881d68)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_ITR_RX_DESQ_NO_MOD (0x881d50)
+#define RGF_DMA_ITR_RX_CNT_TRSH (0x881d44)
+#define RGF_DMA_ITR_RX_CNT_DATA (0x881d48)
+#define RGF_DMA_ITR_RX_CNT_CTL (0x881d4c)
+ #define BIT_DMA_ITR_RX_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_RX_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_RX_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_RX_CNT_CTL_REACHED_TRESH BIT(4)
+ #define BIT_DMA_ITR_RX_CNT_CTL_CROSS_EN BIT(5)
+ #define BIT_DMA_ITR_RX_CNT_CTL_FREE_RUNNIG BIT(6)
+#define RGF_DMA_ITR_RX_IDL_CNT_TRSH (0x881d54)
+#define RGF_DMA_ITR_RX_IDL_CNT_DATA (0x881d58)
+#define RGF_DMA_ITR_RX_IDL_CNT_CTL (0x881d5c)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_MISC_CTL (0x881d6c)
+ #define BIT_OFUL34_RDY_VALID_BUG_FIX_EN BIT(7)
+
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_HP_CTRL (0x88265c)
+#define RGF_PAL_UNIT_ICR (0x88266c) /* struct RGF_ICR */
+#define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4)
+
+/* MAC timer, usec, for packet lifetime */
+#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
+
+#define RGF_CAF_ICR_TALYN_MB (0x8893d4) /* struct RGF_ICR */
+#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
+#define RGF_CAF_OSC_CONTROL (0x88afa4)
+ #define BIT_CAF_OSC_XTAL_EN BIT(0)
+#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
+ #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+
+#define RGF_OTP_QC_SECURED (0x8a0038)
+ #define BIT_BOOT_FROM_ROM BIT(31)
+
+/* eDMA */
+#define RGF_SCM_PTRS_SUBQ_RD_PTR (0x8b4000)
+#define RGF_SCM_PTRS_COMPQ_RD_PTR (0x8b4100)
+#define RGF_DMA_SCM_SUBQ_CONS (0x8b60ec)
+#define RGF_DMA_SCM_COMPQ_PROD (0x8b616c)
+
+#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
+
+#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
+#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
+#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
+
+#define RGF_INT_GEN_CTRL (0x8bc0ec)
+ #define BIT_CONTROL_0 BIT(0)
+
+/* eDMA status interrupts */
+#define RGF_INT_GEN_RX_ICR (0x8bc0f4)
+ #define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
+#define RGF_INT_GEN_TX_ICR (0x8bc110)
+ #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
+#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c)
+#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130)
+
+#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
+
+#define USER_EXT_USER_PMU_3 (0x88d00c)
+ #define BIT_PMU_DEVICE_RDY BIT(0)
+
+#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
+ #define JTAG_DEV_ID_SPARROW (0x2632072f)
+ #define JTAG_DEV_ID_TALYN (0x7e0e1)
+ #define JTAG_DEV_ID_TALYN_MB (0x1007e0e1)
+
+#define RGF_USER_REVISION_ID (0x88afe4)
+#define RGF_USER_REVISION_ID_MASK (3)
+ #define REVISION_ID_SPARROW_B0 (0x0)
+ #define REVISION_ID_SPARROW_D0 (0x3)
+
+#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
+#define RGF_OTP_OEM_MAC (0x8a0334)
+#define RGF_OTP_MAC (0x8a0620)
+
+/* Talyn-MB */
+#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138)
+#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154)
+
+/* crash codes for FW/Ucode stored here */
+
+/* ASSERT RGFs */
+#define SPARROW_RGF_FW_ASSERT_CODE (0x91f020)
+#define SPARROW_RGF_UCODE_ASSERT_CODE (0x91f028)
+#define TALYN_RGF_FW_ASSERT_CODE (0xa37020)
+#define TALYN_RGF_UCODE_ASSERT_CODE (0xa37028)
+
+enum {
+ HW_VER_UNKNOWN,
+ HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
+ HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
+ HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
+ HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */
+};
+
+/* popular locations */
+#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX HOSTADDR(RGF_MBOX)
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT(0)
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
+#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+
+#define WIL_DATA_COMPLETION_TO_MS 200
+
+/* Hardware definitions end */
+#define SPARROW_FW_MAPPING_TABLE_SIZE 10
+#define TALYN_FW_MAPPING_TABLE_SIZE 13
+#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
+#define MAX_FW_MAPPING_TABLE_SIZE 19
+
+/* Common representation of physical address in wil ring */
+struct wil_ring_dma_addr {
+ __le32 addr_low;
+ __le16 addr_high;
+} __packed;
+
+struct fw_map {
+ u32 from; /* linker address - from, inclusive */
+ u32 to; /* linker address - to, exclusive */
+ u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+ const char *name; /* for debugfs */
+ bool fw; /* true if FW mapping, false if UCODE mapping */
+ bool crash_dump; /* true if should be dumped during crash dump */
+};
+
+/* array size should be in sync with actual definition in the wmi.c */
+extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map sparrow_d0_mac_rgf_ext;
+extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
+extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
+
+/**
+ * mk_cidxtid - construct @cidxtid field
+ * @cid: CID value
+ * @tid: TID value
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline u8 mk_cidxtid(u8 cid, u8 tid)
+{
+ return ((tid & 0xf) << 4) | (cid & 0xf);
+}
+
+/**
+ * parse_cidxtid - parse @cidxtid field
+ * @cid: store CID value here
+ * @tid: store TID value here
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
+{
+ *cid = cidxtid & 0xf;
+ *tid = (cidxtid >> 4) & 0xf;
+}
+
+struct wil6210_mbox_ring {
+ u32 base;
+ u16 entry_size; /* max. size of mbox entry, incl. all headers */
+ u16 size;
+ u32 tail;
+ u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+ __le32 sync;
+ __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+ struct wil6210_mbox_ring tx;
+ struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+ __le16 seq;
+ __le16 len; /* payload, bytes after this header */
+ __le16 type;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE (240)
+
+struct pending_wmi_event {
+ struct list_head list;
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wmi_cmd_hdr wmi;
+ u8 data[0];
+ } __packed event;
+};
+
+enum { /* for wil_ctx.mapped_as */
+ wil_mapped_as_none = 0,
+ wil_mapped_as_single = 1,
+ wil_mapped_as_page = 2,
+};
+
+/**
+ * struct wil_ctx - software context for ring descriptor
+ */
+struct wil_ctx {
+ struct sk_buff *skb;
+ u8 nr_frags;
+ u8 mapped_as;
+};
+
+struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
+ u32 *va;
+ dma_addr_t pa;
+};
+
+/**
+ * A general ring structure, used for RX and TX.
+ * In legacy DMA it represents the vring,
+ * In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
+ */
+struct wil_ring {
+ dma_addr_t pa;
+ volatile union wil_ring_desc *va;
+ u16 size; /* number of wil_ring_desc elements */
+ u32 swtail;
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ struct wil_ctx *ctx; /* ctx[size] - software context */
+ struct wil_desc_ring_rx_swtail edma_rx_swtail;
+ bool is_rx;
+};
+
+/**
+ * Additional data for Rx ring.
+ * Used for enhanced DMA RX chaining.
+ */
+struct wil_ring_rx_data {
+ /* the skb being assembled */
+ struct sk_buff *skb;
+ /* true if we are skipping a bad fragmented packet */
+ bool skipping;
+ u16 buff_size;
+};
+
+/**
+ * Status ring structure, used for enhanced DMA completions for RX and TX.
+ */
+struct wil_status_ring {
+ dma_addr_t pa;
+ void *va; /* pointer to ring_[tr]x_status elements */
+ u16 size; /* number of status elements */
+ size_t elem_size; /* status element size in bytes */
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ bool is_rx;
+ u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
+ struct wil_ring_rx_data rx_data;
+ u32 invalid_buff_id_cnt; /* relevant only for RX */
+};
+
+#define WIL_STA_TID_NUM (16)
+#define WIL_MCS_MAX (15) /* Maximum MCS supported */
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ u32 tx_latency_min_us;
+ u32 tx_latency_max_us;
+ u64 tx_latency_total_us;
+ unsigned long rx_dropped;
+ unsigned long rx_non_data_frame;
+ unsigned long rx_short_frame;
+ unsigned long rx_large_frame;
+ unsigned long rx_replay;
+ unsigned long rx_mic_error;
+ unsigned long rx_key_error; /* eDMA specific */
+ unsigned long rx_amsdu_error; /* eDMA specific */
+ unsigned long rx_csum_err;
+ u16 last_mcs_rx;
+ u8 last_cb_mode_rx;
+ u64 rx_per_mcs[WIL_MCS_MAX + 1];
+ u32 ft_roams; /* relevant in STA mode */
+};
+
+/**
+ * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
+ * DMA flow
+ */
+struct wil_txrx_ops {
+ void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
+ /* TX ops */
+ int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
+ int size, int cid, int tid);
+ void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
+ int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
+ int (*tx_init)(struct wil6210_priv *wil);
+ void (*tx_fini)(struct wil6210_priv *wil);
+ int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
+ u32 len, int ring_index);
+ void (*tx_desc_unmap)(struct device *dev,
+ union wil_tx_desc *desc,
+ struct wil_ctx *ctx);
+ int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, struct sk_buff *skb);
+ int (*tx_ring_modify)(struct wil6210_vif *vif, int ring_id,
+ int cid, int tid);
+ irqreturn_t (*irq_tx)(int irq, void *cookie);
+ /* RX ops */
+ int (*rx_init)(struct wil6210_priv *wil, uint ring_order);
+ void (*rx_fini)(struct wil6210_priv *wil);
+ int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
+ void (*get_reorder_params)(struct wil6210_priv *wil,
+ struct sk_buff *skb, int *tid, int *cid,
+ int *mid, u16 *seq, int *mcast, int *retry);
+ void (*get_netif_rx_params)(struct sk_buff *skb,
+ int *cid, int *security);
+ int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
+ int (*rx_error_check)(struct wil6210_priv *wil, struct sk_buff *skb,
+ struct wil_net_stats *stats);
+ bool (*is_rx_idle)(struct wil6210_priv *wil);
+ irqreturn_t (*irq_rx)(int irq, void *cookie);
+};
+
+/**
+ * Additional data for Tx ring
+ */
+struct wil_ring_tx_data {
+ bool dot1x_open;
+ int enabled;
+ cycles_t idle, last_idle, begin;
+ u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
+ u16 agg_timeout;
+ u8 agg_amsdu;
+ bool addba_in_progress; /* if set, agg_xxx is for request in progress */
+ u8 mid;
+ spinlock_t lock;
+};
+
+enum { /* for wil6210_priv.status */
+ wil_status_fwready = 0, /* FW operational */
+ wil_status_dontscan,
+ wil_status_mbox_ready, /* MBOX structures ready */
+ wil_status_irqen, /* interrupts enabled - for debug */
+ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
+ wil_status_resetting, /* reset in progress */
+ wil_status_suspending, /* suspend in progress */
+ wil_status_suspended, /* suspend completed, device is suspended */
+ wil_status_resuming, /* resume in progress */
+ wil_status_last /* keep last */
+};
+
+struct pci_dev;
+
+/**
+ * struct tid_ampdu_rx - TID aggregation information (Rx).
+ *
+ * @reorder_buf: buffer to reorder incoming aggregated MPDUs
+ * @last_rx: jiffies of last rx activity
+ * @head_seq_num: head sequence number in reordering buffer.
+ * @stored_mpdu_num: number of MPDUs in reordering buffer
+ * @ssn: Starting Sequence Number expected to be aggregated.
+ * @buf_size: buffer size for incoming A-MPDUs
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
+ * @first_time: true when this buffer used 1-st time
+ * @mcast_last_seq: sequence number (SN) of last received multicast packet
+ * @drop_dup_mcast: duplicate multicast frames dropped for this reorder buffer
+ */
+struct wil_tid_ampdu_rx {
+ struct sk_buff **reorder_buf;
+ unsigned long last_rx;
+ u16 head_seq_num;
+ u16 stored_mpdu_num;
+ u16 ssn;
+ u16 buf_size;
+ u16 ssn_last_drop;
+ unsigned long long total; /* frames processed */
+ unsigned long long drop_dup;
+ unsigned long long drop_old;
+ bool first_time; /* is it 1-st time this buffer used? */
+ u16 mcast_last_seq; /* multicast dup detection */
+ unsigned long long drop_dup_mcast;
+};
+
+/**
+ * struct wil_tid_crypto_rx_single - TID crypto information (Rx).
+ *
+ * @pn: GCMP PN for the session
+ * @key_set: valid key present
+ */
+struct wil_tid_crypto_rx_single {
+ u8 pn[IEEE80211_GCMP_PN_LEN];
+ bool key_set;
+};
+
+struct wil_tid_crypto_rx {
+ struct wil_tid_crypto_rx_single key_id[4];
+};
+
+struct wil_p2p_info {
+ struct ieee80211_channel listen_chan;
+ u8 discovery_started;
+ u64 cookie;
+ struct wireless_dev *pending_listen_wdev;
+ unsigned int listen_duration;
+ struct timer_list discovery_timer; /* listen/search duration */
+ struct work_struct discovery_expired_work; /* listen/search expire */
+ struct work_struct delayed_listen_work; /* listen after scan done */
+};
+
+enum wil_sta_status {
+ wil_sta_unused = 0,
+ wil_sta_conn_pending = 1,
+ wil_sta_connected = 2,
+};
+
+enum wil_rekey_state {
+ WIL_REKEY_IDLE = 0,
+ WIL_REKEY_M3_RECEIVED = 1,
+ WIL_REKEY_WAIT_M4_SENT = 2,
+};
+
+/**
+ * struct wil_sta_info - data for peer
+ *
+ * Peer identified by its CID (connection ID)
+ * NIC performs beam forming for each peer;
+ * if no beam forming done, frame exchange is not
+ * possible.
+ */
+struct wil_sta_info {
+ u8 addr[ETH_ALEN];
+ u8 mid;
+ enum wil_sta_status status;
+ struct wil_net_stats stats;
+ /**
+ * 20 latency bins. 1st bin counts packets with latency
+ * of 0..tx_latency_res, last bin counts packets with latency
+ * of 19*tx_latency_res and above.
+ * tx_latency_res is configured from "tx_latency" debug-fs.
+ */
+ u64 *tx_latency_bins;
+ struct wmi_link_stats_basic fw_stats_basic;
+ /* Rx BACK */
+ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
+ spinlock_t tid_rx_lock; /* guarding tid_rx array */
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
+ struct wil_tid_crypto_rx group_crypto_rx;
+ u8 aid; /* 1-254; 0 if unknown/not reported */
+};
+
+enum {
+ fw_recovery_idle = 0,
+ fw_recovery_pending = 1,
+ fw_recovery_running = 2,
+};
+
+enum {
+ hw_capa_no_flash,
+ hw_capa_last
+};
+
+struct wil_probe_client_req {
+ struct list_head list;
+ u64 cookie;
+ u8 cid;
+};
+
+struct pmc_ctx {
+ /* alloc, free, and read operations must own the lock */
+ struct mutex lock;
+ struct vring_tx_desc *pring_va;
+ dma_addr_t pring_pa;
+ struct desc_alloc_info *descriptors;
+ int last_cmd_status;
+ int num_descriptors;
+ int descriptor_size;
+};
+
+struct wil_halp {
+ struct mutex lock; /* protect halp ref_cnt */
+ unsigned int ref_cnt;
+ struct completion comp;
+ u8 handle_icr;
+};
+
+struct wil_blob_wrapper {
+ struct wil6210_priv *wil;
+ struct debugfs_blob_wrapper blob;
+};
+
+#define WIL_LED_MAX_ID (2)
+#define WIL_LED_INVALID_ID (0xF)
+#define WIL_LED_BLINK_ON_SLOW_MS (300)
+#define WIL_LED_BLINK_OFF_SLOW_MS (300)
+#define WIL_LED_BLINK_ON_MED_MS (200)
+#define WIL_LED_BLINK_OFF_MED_MS (200)
+#define WIL_LED_BLINK_ON_FAST_MS (100)
+#define WIL_LED_BLINK_OFF_FAST_MS (100)
+enum {
+ WIL_LED_TIME_SLOW = 0,
+ WIL_LED_TIME_MED,
+ WIL_LED_TIME_FAST,
+ WIL_LED_TIME_LAST,
+};
+
+struct blink_on_off_time {
+ u32 on_ms;
+ u32 off_ms;
+};
+
+struct wil_debugfs_iomem_data {
+ void *offset;
+ struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+ struct wil_debugfs_iomem_data *data_arr;
+ int iomem_data_count;
+};
+
+extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
+extern u8 led_id;
+extern u8 led_polarity;
+
+enum wil6210_vif_status {
+ wil_vif_fwconnecting,
+ wil_vif_fwconnected,
+ wil_vif_ft_roam,
+ wil_vif_status_last /* keep last */
+};
+
+struct wil6210_vif {
+ struct wireless_dev wdev;
+ struct net_device *ndev;
+ struct wil6210_priv *wil;
+ u8 mid;
+ DECLARE_BITMAP(status, wil_vif_status_last);
+ u32 privacy; /* secure connection? */
+ u16 channel; /* relevant in AP mode */
+ u8 wmi_edmg_channel; /* relevant in AP mode */
+ u8 hidden_ssid; /* relevant in AP mode */
+ u32 ap_isolate; /* no intra-BSS communication */
+ bool pbss;
+ int bi;
+ u8 *proberesp, *proberesp_ies, *assocresp_ies;
+ size_t proberesp_len, proberesp_ies_len, assocresp_ies_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u8 gtk_index;
+ u8 gtk[WMI_MAX_KEY_LEN];
+ size_t gtk_len;
+ int bcast_ring;
+ struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
+ int locally_generated_disc; /* relevant in STA mode */
+ struct timer_list connect_timer;
+ struct work_struct disconnect_worker;
+ /* scan */
+ struct cfg80211_scan_request *scan_request;
+ struct timer_list scan_timer; /* detect scan timeout */
+ struct wil_p2p_info p2p;
+ /* keep alive */
+ struct list_head probe_client_pending;
+ struct mutex probe_client_mutex; /* protect @probe_client_pending */
+ struct work_struct probe_client_worker;
+ int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
+ bool fw_stats_ready; /* per-cid statistics are ready inside sta_info */
+ u64 fw_stats_tsf; /* measurement timestamp */
+
+ /* PTK rekey race prevention, this is relevant to station mode only */
+ enum wil_rekey_state ptk_rekey_state;
+ struct work_struct enable_tx_key_worker;
+};
+
+/**
+ * RX buffer allocated for enhanced DMA RX descriptors
+ */
+struct wil_rx_buff {
+ struct sk_buff *skb;
+ struct list_head list;
+ int id;
+};
+
+/**
+ * During Rx completion processing, the driver extracts a buffer ID which
+ * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
+ * is given to the network stack and the buffer is moved from the 'active'
+ * list to the 'free' list.
+ * During Rx refill, SKBs are attached to free buffers and moved to the
+ * 'active' list.
+ */
+struct wil_rx_buff_mgmt {
+ struct wil_rx_buff *buff_arr;
+ size_t size; /* number of items in buff_arr */
+ struct list_head active;
+ struct list_head free;
+ unsigned long free_list_empty_cnt; /* statistics */
+};
+
+struct wil_fw_stats_global {
+ bool ready;
+ u64 tsf; /* measurement timestamp */
+ struct wmi_link_stats_global stats;
+};
+
+struct wil_brd_info {
+ u32 file_addr;
+ u32 file_max_size;
+};
+
+struct wil6210_priv {
+ struct pci_dev *pdev;
+ u32 bar_size;
+ struct wiphy *wiphy;
+ struct net_device *main_ndev;
+ int n_msi;
+ void __iomem *csr;
+ DECLARE_BITMAP(status, wil_status_last);
+ u8 fw_version[ETHTOOL_FWVERS_LEN];
+ u32 hw_version;
+ u8 chip_revision;
+ const char *hw_name;
+ const char *wil_fw_name;
+ char *board_file;
+ u32 num_of_brd_entries;
+ struct wil_brd_info *brd_info;
+ DECLARE_BITMAP(hw_capa, hw_capa_last);
+ DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
+ u32 recovery_count; /* num of FW recovery attempts in a short time */
+ u32 recovery_state; /* FW recovery state machine */
+ unsigned long last_fw_recovery; /* jiffies of last fw recovery */
+ wait_queue_head_t wq; /* for all wait_event() use */
+ u8 max_vifs; /* maximum number of interfaces, including main */
+ struct wil6210_vif *vifs[WIL_MAX_VIFS];
+ struct mutex vif_mutex; /* protects access to VIF entries */
+ atomic_t connected_vifs;
+ u32 max_assoc_sta; /* max sta's supported by the driver and the FW */
+
+ /* profile */
+ struct cfg80211_chan_def monitor_chandef;
+ u32 monitor_flags;
+ int sinfo_gen;
+ /* interrupt moderation */
+ u32 tx_max_burst_duration;
+ u32 tx_interframe_timeout;
+ u32 rx_max_burst_duration;
+ u32 rx_interframe_timeout;
+ /* cached ISR registers */
+ u32 isr_misc;
+ /* mailbox related */
+ struct mutex wmi_mutex;
+ struct wil6210_mbox_ctl mbox_ctl;
+ struct completion wmi_ready;
+ struct completion wmi_call;
+ u16 wmi_seq;
+ u16 reply_id; /**< wait for this WMI event */
+ u8 reply_mid;
+ void *reply_buf;
+ u16 reply_size;
+ struct workqueue_struct *wmi_wq; /* for deferred calls */
+ struct work_struct wmi_event_worker;
+ struct workqueue_struct *wq_service;
+ struct work_struct fw_error_worker; /* for FW error recovery */
+ struct list_head pending_wmi_ev;
+ /*
+ * protect pending_wmi_ev
+ * - fill in IRQ from wil6210_irq_misc,
+ * - consumed in thread by wmi_event_worker
+ */
+ spinlock_t wmi_ev_lock;
+ spinlock_t net_queue_lock; /* guarding stop/wake netif queue */
+ spinlock_t eap_lock; /* guarding access to eap rekey fields */
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ struct net_device napi_ndev; /* dummy net_device serving all VIFs */
+
+ /* DMA related */
+ struct wil_ring ring_rx;
+ unsigned int rx_buf_len;
+ struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
+ struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
+ struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
+ u8 num_rx_status_rings;
+ int tx_sring_idx;
+ u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_sta_info sta[WIL6210_MAX_CID];
+ u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
+ u32 dma_addr_size; /* indicates dma addr size */
+ struct wil_rx_buff_mgmt rx_buff_mgmt;
+ bool use_enhanced_dma_hw;
+ struct wil_txrx_ops txrx_ops;
+
+ struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* for synchronizing device memory access while reset or suspend */
+ struct rw_semaphore mem_lock;
+ /* statistics */
+ atomic_t isr_count_rx, isr_count_tx;
+ /* debugfs */
+ struct dentry *debug;
+ struct wil_blob_wrapper blobs[MAX_FW_MAPPING_TABLE_SIZE];
+ u8 discovery_mode;
+ u8 abft_len;
+ u8 wakeup_trigger;
+ struct wil_suspend_stats suspend_stats;
+ struct wil_debugfs_data dbg_data;
+ bool tx_latency; /* collect TX latency measurements */
+ size_t tx_latency_res; /* bin resolution in usec */
+
+ void *platform_handle;
+ struct wil_platform_ops platform_ops;
+ bool keep_radio_on_during_sleep;
+
+ struct pmc_ctx pmc;
+
+ u8 p2p_dev_started;
+
+ /* P2P_DEVICE vif */
+ struct wireless_dev *p2p_wdev;
+ struct wireless_dev *radio_wdev;
+
+ /* High Access Latency Policy voting */
+ struct wil_halp halp;
+
+ enum wmi_ps_profile_type ps_profile;
+
+ int fw_calib_result;
+
+ struct notifier_block pm_notify;
+
+ bool suspend_resp_rcvd;
+ bool suspend_resp_comp;
+ u32 bus_request_kbps;
+ u32 bus_request_kbps_pre_suspend;
+
+ u32 rgf_fw_assert_code_addr;
+ u32 rgf_ucode_assert_code_addr;
+ u32 iccm_base;
+
+ /* relevant only for eDMA */
+ bool use_compressed_rx_status;
+ u32 rx_status_ring_order;
+ u32 tx_status_ring_order;
+ u32 rx_buff_id_count;
+ bool amsdu_en;
+ bool use_rx_hw_reordering;
+ bool secured_boot;
+ u8 boot_config;
+
+ struct wil_fw_stats_global fw_stats_global;
+
+ u32 max_agg_wsize;
+ u32 max_ampdu_size;
+ u8 multicast_to_unicast;
+ s32 cqm_rssi_thold;
+};
+
+#define wil_to_wiphy(i) (i->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+#define ndev_to_vif(n) (struct wil6210_vif *)(netdev_priv(n))
+#define vif_to_wil(v) (v->wil)
+#define vif_to_ndev(v) (v->ndev)
+#define vif_to_wdev(v) (&v->wdev)
+#define GET_MAX_VIFS(wil) min_t(int, (wil)->max_vifs, WIL_MAX_VIFS)
+
+static inline struct wil6210_vif *wdev_to_vif(struct wil6210_priv *wil,
+ struct wireless_dev *wdev)
+{
+ /* main interface is shared with P2P device */
+ if (wdev == wil->p2p_wdev)
+ return ndev_to_vif(wil->main_ndev);
+ else
+ return container_of(wdev, struct wil6210_vif, wdev);
+}
+
+static inline struct wireless_dev *
+vif_to_radio_wdev(struct wil6210_priv *wil, struct wil6210_vif *vif)
+{
+ /* main interface is shared with P2P device */
+ if (vif->mid)
+ return vif_to_wdev(vif);
+ else
+ return wil->radio_wdev;
+}
+
+__printf(2, 3)
+void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void __wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void __wil_info(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...);
+#define wil_dbg(wil, fmt, arg...) do { \
+ netdev_dbg(wil->main_ndev, fmt, ##arg); \
+ wil_dbg_trace(wil, fmt, ##arg); \
+} while (0)
+
+#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+#define wil_err(wil, fmt, arg...) __wil_err(wil, "%s: " fmt, __func__, ##arg)
+#define wil_info(wil, fmt, arg...) __wil_info(wil, "%s: " fmt, __func__, ##arg)
+#define wil_err_ratelimited(wil, fmt, arg...) \
+ __wil_err_ratelimited(wil, "%s: " fmt, __func__, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+ return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ writel(val, wil->csr + HOSTADDR(reg));
+ wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+ wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
+
+/**
+ * wil_cid_valid - check cid is valid
+ */
+static inline bool wil_cid_valid(struct wil6210_priv *wil, int cid)
+{
+ return (cid >= 0 && cid < wil->max_assoc_sta && cid < WIL6210_MAX_CID);
+}
+
+void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[MISC]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+static inline
+void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
+
+static inline
+void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
+
+static inline
+void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count);
+int wil_mem_access_lock(struct wil6210_priv *wil);
+void wil_mem_access_unlock(struct wil6210_priv *wil);
+
+struct wil6210_vif *
+wil_vif_alloc(struct wil6210_priv *wil, const char *name,
+ unsigned char name_assign_type, enum nl80211_iftype iftype);
+void wil_vif_free(struct wil6210_vif *vif);
+void *wil_if_alloc(struct device *dev);
+bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
+ struct net_device *ndev, bool up, bool ok);
+bool wil_has_active_ifaces(struct wil6210_priv *wil, bool up, bool ok);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_vif_remove(struct wil6210_priv *wil, u8 mid);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_ps_update(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile);
+int wil_reset(struct wil6210_priv *wil, bool no_fw);
+void wil_fw_error_recovery(struct wil6210_priv *wil);
+void wil_set_recovery_state(struct wil6210_priv *wil, int state);
+bool wil_is_recovery_blocked(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int __wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac);
+int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx);
+void wil_set_ethtoolops(struct net_device *ndev);
+
+struct fw_map *wil_find_fw_mapping(const char *section);
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
+ u16 reply_id, void *reply, u16 reply_size, int to_msec);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_del_cipher_key(struct wil6210_vif *vif, u8 key_index,
+ const void *mac_addr, int key_usage);
+int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
+ const void *mac_addr, int key_len, const void *key,
+ int key_usage);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
+int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
+int wmi_rxon(struct wil6210_priv *wil, bool on);
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_get_all_temperatures(struct wil6210_priv *wil,
+ struct wmi_temp_sense_all_done_event
+ *sense_all_evt);
+int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
+ bool del_sta);
+int wmi_addba(struct wil6210_priv *wil, u8 mid,
+ u8 ringid, u8 size, u16 timeout);
+int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason);
+int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason);
+int wmi_addba_rx_resp(struct wil6210_priv *wil,
+ u8 mid, u8 cid, u8 tid, u8 token,
+ u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
+int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile);
+int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short);
+int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short);
+int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid);
+int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
+ const u8 *mac, enum nl80211_iftype iftype);
+int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl);
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
+
+void wil6210_clear_irq(struct wil6210_priv *wil);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil_mask_irq(struct wil6210_priv *wil);
+void wil_unmask_irq(struct wil6210_priv *wil);
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
+void wil_disable_irq(struct wil6210_priv *wil);
+void wil_enable_irq(struct wil6210_priv *wil);
+void wil6210_mask_halp(struct wil6210_priv *wil);
+
+/* P2P */
+bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
+int wil_p2p_search(struct wil6210_vif *vif,
+ struct cfg80211_scan_request *request);
+int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
+ unsigned int duration, struct ieee80211_channel *chan,
+ u64 *cookie);
+u8 wil_p2p_stop_discovery(struct wil6210_vif *vif);
+int wil_p2p_cancel_listen(struct wil6210_vif *vif, u64 cookie);
+void wil_p2p_listen_expired(struct work_struct *work);
+void wil_p2p_search_expired(struct work_struct *work);
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
+void wil_p2p_delayed_listen_work(struct work_struct *work);
+
+/* WMI for P2P */
+int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi);
+int wmi_start_listen(struct wil6210_vif *vif);
+int wmi_start_search(struct wil6210_vif *vif);
+int wmi_stop_discovery(struct wil6210_vif *vif);
+
+int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie);
+void wil_cfg80211_ap_recovery(struct wil6210_priv *wil);
+int wil_cfg80211_iface_combinations_from_fw(
+ struct wil6210_priv *wil,
+ const struct wil_fw_record_concurrency *conc);
+int wil_vif_prepare_stop(struct wil6210_vif *vif);
+
+#if defined(CONFIG_WIL6210_DEBUGFS)
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+#else
+static inline int wil6210_debugfs_init(struct wil6210_priv *wil) { return 0; }
+static inline void wil6210_debugfs_remove(struct wil6210_priv *wil) {}
+#endif
+
+int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
+ struct station_info *sinfo);
+
+struct wil6210_priv *wil_cfg80211_init(struct device *dev);
+void wil_cfg80211_deinit(struct wil6210_priv *wil);
+void wil_p2p_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr);
+int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan,
+ u8 edmg_chan, u8 hidden_ssid, u8 is_go);
+int wmi_pcp_stop(struct wil6210_vif *vif);
+int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
+int wmi_abort_scan(struct wil6210_vif *vif);
+void wil_abort_scan(struct wil6210_vif *vif, bool sync);
+void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync);
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
+void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code);
+void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code);
+void wil_probe_client_flush(struct wil6210_vif *vif);
+void wil_probe_client_worker(struct work_struct *work);
+void wil_disconnect_worker(struct work_struct *work);
+void wil_enable_tx_key_worker(struct work_struct *work);
+
+void wil_init_txrx_ops(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
+int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
+int wil_bcast_init(struct wil6210_vif *vif);
+void wil_bcast_fini(struct wil6210_vif *vif);
+void wil_bcast_fini_all(struct wil6210_priv *wil);
+
+void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, bool should_stop);
+void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
+ struct wil_ring *ring, bool check_stop);
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int wil_tx_complete(struct wil6210_vif *vif, int ringid);
+void wil_tx_complete_handle_eapol(struct wil6210_vif *vif,
+ struct sk_buff *skb);
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil, int *quota);
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
+void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load);
+int wil_request_board(struct wil6210_priv *wil, const char *name);
+bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+bool wil_is_wmi_idle(struct wil6210_priv *wil);
+int wmi_resume(struct wil6210_priv *wil);
+int wmi_suspend(struct wil6210_priv *wil);
+bool wil_is_tx_idle(struct wil6210_priv *wil);
+
+int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
+void wil_fw_core_dump(struct wil6210_priv *wil);
+
+void wil_halp_vote(struct wil6210_priv *wil);
+void wil_halp_unvote(struct wil6210_priv *wil);
+void wil6210_set_halp(struct wil6210_priv *wil);
+void wil6210_clear_halp(struct wil6210_priv *wil);
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms);
+int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold);
+
+int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch);
+int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch);
+void wil_update_supported_bands(struct wil6210_priv *wil);
+
+int reverse_memcmp(const void *cs, const void *ct, size_t count);
+
+/* WMI for enhanced DMA */
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
+ u16 max_rx_pl_per_desc);
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid);
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
+ u8 tid, u8 token, u16 status, bool amsdu,
+ u16 agg_wsize, u16 timeout);
+
+void update_supported_bands(struct wil6210_priv *wil);
+
+void wil_clear_fw_log_addr(struct wil6210_priv *wil);
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst);
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
new file mode 100644
index 000000000..89c12cb2a
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "wil6210.h"
+#include <linux/devcoredump.h>
+
+static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
+ u32 *out_dump_size, u32 *out_host_min)
+{
+ int i;
+ const struct fw_map *map;
+ u32 host_min, host_max, tmp_max;
+
+ if (!out_dump_size)
+ return -EINVAL;
+
+ /* calculate the total size of the unpacked crash dump */
+ BUILD_BUG_ON(ARRAY_SIZE(fw_mapping) == 0);
+ map = &fw_mapping[0];
+ host_min = map->host;
+ host_max = map->host + (map->to - map->from);
+
+ for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
+ map = &fw_mapping[i];
+
+ if (!map->crash_dump)
+ continue;
+
+ if (map->host < host_min)
+ host_min = map->host;
+
+ tmp_max = map->host + (map->to - map->from);
+ if (tmp_max > host_max)
+ host_max = tmp_max;
+ }
+
+ *out_dump_size = host_max - host_min;
+ if (out_host_min)
+ *out_host_min = host_min;
+
+ return 0;
+}
+
+int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
+{
+ int i;
+ const struct fw_map *map;
+ void *data;
+ u32 host_min, dump_size, offset, len;
+
+ if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
+ wil_err(wil, "fail to obtain crash dump size\n");
+ return -EINVAL;
+ }
+
+ if (dump_size > size) {
+ wil_err(wil, "not enough space for dump. Need %d have %d\n",
+ dump_size, size);
+ return -EINVAL;
+ }
+
+ down_write(&wil->mem_lock);
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil,
+ "suspend/resume in progress. cannot copy crash dump\n");
+ up_write(&wil->mem_lock);
+ return -EBUSY;
+ }
+
+ /* copy to crash dump area */
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ map = &fw_mapping[i];
+
+ if (!map->crash_dump)
+ continue;
+
+ data = (void * __force)wil->csr + HOSTADDR(map->host);
+ len = map->to - map->from;
+ offset = map->host - host_min;
+
+ wil_dbg_misc(wil,
+ "fw_copy_crash_dump: - dump %s, size %d, offset %d\n",
+ fw_mapping[i].name, len, offset);
+
+ wil_memcpy_fromio_32((void * __force)(dest + offset),
+ (const void __iomem * __force)data, len);
+ }
+
+ up_write(&wil->mem_lock);
+
+ return 0;
+}
+
+void wil_fw_core_dump(struct wil6210_priv *wil)
+{
+ void *fw_dump_data;
+ u32 fw_dump_size;
+
+ if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
+ wil_err(wil, "fail to get fw dump size\n");
+ return;
+ }
+
+ fw_dump_data = vzalloc(fw_dump_size);
+ if (!fw_dump_data)
+ return;
+
+ if (wil_fw_copy_crash_dump(wil, fw_dump_data, fw_dump_size)) {
+ vfree(fw_dump_data);
+ return;
+ }
+ /* fw_dump_data will be free in device coredump release function
+ * after 5 min
+ */
+ dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
+ wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
new file mode 100644
index 000000000..e152dc29d
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ */
+
+#include <linux/device.h>
+#include "wil_platform.h"
+
+int __init wil_platform_modinit(void)
+{
+ return 0;
+}
+
+void wil_platform_modexit(void)
+{
+}
+
+/* wil_platform_init() - wil6210 platform module init
+ *
+ * The function must be called before all other functions in this module.
+ * It returns a handle which is used with the rest of the API
+ *
+ */
+void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops,
+ const struct wil_platform_rops *rops, void *wil_handle)
+{
+ void *handle = ops; /* to return some non-NULL for 'void' impl. */
+
+ if (!ops) {
+ dev_err(dev,
+ "Invalid parameter. Cannot init platform module\n");
+ return NULL;
+ }
+
+ /* platform specific init functions should be called here */
+
+ return handle;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
new file mode 100644
index 000000000..5ff662022
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ */
+
+#ifndef __WIL_PLATFORM_H__
+#define __WIL_PLATFORM_H__
+
+struct device;
+
+enum wil_platform_event {
+ WIL_PLATFORM_EVT_FW_CRASH = 0,
+ WIL_PLATFORM_EVT_PRE_RESET = 1,
+ WIL_PLATFORM_EVT_FW_RDY = 2,
+ WIL_PLATFORM_EVT_PRE_SUSPEND = 3,
+ WIL_PLATFORM_EVT_POST_SUSPEND = 4,
+};
+
+enum wil_platform_features {
+ WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_TRIPLE_MSI = 1,
+ WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+ WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+ WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+ WIL_PLATFORM_CAPA_EXT_CLK = 2,
+ WIL_PLATFORM_CAPA_MAX,
+};
+
+/**
+ * struct wil_platform_ops - wil platform module calls from this
+ * driver to platform driver
+ */
+struct wil_platform_ops {
+ int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
+ int (*suspend)(void *handle, bool keep_device_power);
+ int (*resume)(void *handle, bool device_powered_on);
+ void (*uninit)(void *handle);
+ int (*notify)(void *handle, enum wil_platform_event evt);
+ int (*get_capa)(void *handle);
+ void (*set_features)(void *handle, int features);
+};
+
+/**
+ * struct wil_platform_rops - wil platform module callbacks from
+ * platform driver to this driver
+ * @ramdump: store a ramdump from the wil firmware. The platform
+ * driver may add additional data to the ramdump to
+ * generate the final crash dump.
+ * @fw_recovery: start a firmware recovery process. Called as
+ * part of a crash recovery process which may include other
+ * related platform subsystems.
+ */
+struct wil_platform_rops {
+ int (*ramdump)(void *wil_handle, void *buf, uint32_t size);
+ int (*fw_recovery)(void *wil_handle);
+};
+
+/**
+ * wil_platform_init - initialize the platform driver
+ *
+ * @dev - pointer to the wil6210 device
+ * @ops - structure with platform driver operations. Platform
+ * driver will fill this structure with function pointers.
+ * @rops - structure with callbacks from platform driver to
+ * this driver. The platform driver copies the structure to
+ * its own storage. Can be NULL if this driver does not
+ * support crash recovery.
+ * @wil_handle - context for this driver that will be passed
+ * when platform driver invokes one of the callbacks in
+ * rops. May be NULL if rops is NULL.
+ */
+void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops,
+ const struct wil_platform_rops *rops, void *wil_handle);
+
+int __init wil_platform_modinit(void);
+void wil_platform_modexit(void);
+
+#endif /* __WIL_PLATFORM_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644
index 000000000..6a5976a29
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -0,0 +1,4054 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+#include "wmi.h"
+#include "trace.h"
+
+/* set the default max assoc sta to max supported by driver */
+uint max_assoc_sta = WIL6210_MAX_CID;
+module_param(max_assoc_sta, uint, 0444);
+MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
+
+int agg_wsize; /* = 0; */
+module_param(agg_wsize, int, 0644);
+MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
+ " 0 - use default; < 0 - don't auto-establish");
+
+u8 led_id = WIL_LED_INVALID_ID;
+module_param(led_id, byte, 0444);
+MODULE_PARM_DESC(led_id,
+ " 60G device led enablement. Set the led ID (0-2) to enable");
+
+#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_PCP_STOP_TO_MS 5000
+
+/**
+ * DOC: WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * DOC: Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ * - MAC CPU (ucode)
+ * - User CPU (firmware)
+ * - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allow faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ */
+
+/* sparrow_fw_mapping provides memory remapping table for sparrow
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Sparrow memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x808000 0x900000 .. 0x908000 32k DCCM
+ * 0x840000 .. 0x860000 0x908000 .. 0x928000 128k PERIPH
+ */
+const struct fw_map sparrow_fw_mapping[] = {
+ /* FW code RAM 256k */
+ {0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
+ /* FW data RAM 32k */
+ {0x800000, 0x808000, 0x900000, "fw_data", true, true},
+ /* periph data 128k */
+ {0x840000, 0x860000, 0x908000, "fw_peri", true, true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+ /* mac_ext_rgf 512b */
+ {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
+ /* upper area 548k */
+ {0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 128k */
+ {0x000000, 0x020000, 0x920000, "uc_code", false, false},
+ /* ucode data RAM 16k */
+ {0x800000, 0x804000, 0x940000, "uc_data", false, false},
+};
+
+/* sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+ * it is a bit larger to support extra features
+ */
+const struct fw_map sparrow_d0_mac_rgf_ext = {
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
+};
+
+/* talyn_fw_mapping provides memory remapping table for Talyn
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_fw_mapping[] = {
+ /* FW code RAM 1M */
+ {0x000000, 0x100000, 0x900000, "fw_code", true, true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+ /* mac_ext_rgf 1344b */
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
+};
+
+/* talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn MB memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_mb_fw_mapping[] = {
+ /* FW code RAM 768k */
+ {0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true, true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
+ /* mac_ext_rgf 2256b */
+ {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
+ /* SEC PKA 16k */
+ {0x890000, 0x894000, 0x890000, "sec_pka", true, true},
+ /* SEC KDF RGF 3096b */
+ {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
+ /* SEC MAIN 2124b */
+ {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
+ /* DUM USER RGF 528b */
+ {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
+ /* DMA OFU 296b */
+ {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
+ /* ucode debug 256b */
+ {0x8c3000, 0x8c3100, 0x8c3000, "ucode_debug", true, true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true, true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
+};
+
+struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
+
+struct blink_on_off_time led_blink_time[] = {
+ {WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
+ {WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
+ {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
+};
+
+struct auth_no_hdr {
+ __le16 auth_alg;
+ __le16 auth_transaction;
+ __le16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[];
+} __packed;
+
+u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
+
+/**
+ * wmi_addr_remap - return AHB address for given firmware internal (linker) address
+ * @x: internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ if (fw_mapping[i].fw &&
+ ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)))
+ return x + fw_mapping[i].host - fw_mapping[i].from;
+ }
+
+ return 0;
+}
+
+/**
+ * wil_find_fw_mapping - find fw_mapping entry by section name
+ * @section: section name
+ *
+ * Return pointer to section or NULL if not found
+ */
+struct fw_map *wil_find_fw_mapping(const char *section)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++)
+ if (fw_mapping[i].name &&
+ !strcmp(section, fw_mapping[i].name))
+ return &fw_mapping[i];
+
+ return NULL;
+}
+
+/**
+ * wmi_buffer_block - Check address validity for WMI buffer; remap if needed
+ * @wil: driver data
+ * @ptr_: internal (linker) fw/ucode address
+ * @size: if non zero, validate the block does not
+ * exceed the device memory (bar)
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
+{
+ u32 off;
+ u32 ptr = le32_to_cpu(ptr_);
+
+ if (ptr % 4)
+ return NULL;
+
+ ptr = wmi_addr_remap(ptr);
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > wil->bar_size - 4)
+ return NULL;
+ if (size && ((off + size > wil->bar_size) || (off + size < off)))
+ return NULL;
+
+ return wil->csr + off;
+}
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ return wmi_buffer_block(wil, ptr_, 0);
+}
+
+/* Check address validity */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+ u32 off;
+
+ if (ptr % 4)
+ return NULL;
+
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > wil->bar_size - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr)
+{
+ void __iomem *src = wmi_buffer(wil, ptr);
+
+ if (!src)
+ return -EINVAL;
+
+ wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+ return 0;
+}
+
+static const char *cmdid2name(u16 cmdid)
+{
+ switch (cmdid) {
+ case WMI_NOTIFY_REQ_CMDID:
+ return "WMI_NOTIFY_REQ_CMD";
+ case WMI_START_SCAN_CMDID:
+ return "WMI_START_SCAN_CMD";
+ case WMI_CONNECT_CMDID:
+ return "WMI_CONNECT_CMD";
+ case WMI_DISCONNECT_CMDID:
+ return "WMI_DISCONNECT_CMD";
+ case WMI_SW_TX_REQ_CMDID:
+ return "WMI_SW_TX_REQ_CMD";
+ case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+ case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_BRP_SET_ANT_LIMIT_CMDID:
+ return "WMI_BRP_SET_ANT_LIMIT_CMD";
+ case WMI_TOF_SESSION_START_CMDID:
+ return "WMI_TOF_SESSION_START_CMD";
+ case WMI_AOA_MEAS_CMDID:
+ return "WMI_AOA_MEAS_CMD";
+ case WMI_PMC_CMDID:
+ return "WMI_PMC_CMD";
+ case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+ case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+ case WMI_VRING_CFG_CMDID:
+ return "WMI_VRING_CFG_CMD";
+ case WMI_BCAST_VRING_CFG_CMDID:
+ return "WMI_BCAST_VRING_CFG_CMD";
+ case WMI_TRAFFIC_SUSPEND_CMDID:
+ return "WMI_TRAFFIC_SUSPEND_CMD";
+ case WMI_TRAFFIC_RESUME_CMDID:
+ return "WMI_TRAFFIC_RESUME_CMD";
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMD";
+ case WMI_SET_MAC_ADDRESS_CMDID:
+ return "WMI_SET_MAC_ADDRESS_CMD";
+ case WMI_LED_CFG_CMDID:
+ return "WMI_LED_CFG_CMD";
+ case WMI_PCP_START_CMDID:
+ return "WMI_PCP_START_CMD";
+ case WMI_PCP_STOP_CMDID:
+ return "WMI_PCP_STOP_CMD";
+ case WMI_SET_SSID_CMDID:
+ return "WMI_SET_SSID_CMD";
+ case WMI_GET_SSID_CMDID:
+ return "WMI_GET_SSID_CMD";
+ case WMI_SET_PCP_CHANNEL_CMDID:
+ return "WMI_SET_PCP_CHANNEL_CMD";
+ case WMI_GET_PCP_CHANNEL_CMDID:
+ return "WMI_GET_PCP_CHANNEL_CMD";
+ case WMI_P2P_CFG_CMDID:
+ return "WMI_P2P_CFG_CMD";
+ case WMI_PORT_ALLOCATE_CMDID:
+ return "WMI_PORT_ALLOCATE_CMD";
+ case WMI_PORT_DELETE_CMDID:
+ return "WMI_PORT_DELETE_CMD";
+ case WMI_START_LISTEN_CMDID:
+ return "WMI_START_LISTEN_CMD";
+ case WMI_START_SEARCH_CMDID:
+ return "WMI_START_SEARCH_CMD";
+ case WMI_DISCOVERY_STOP_CMDID:
+ return "WMI_DISCOVERY_STOP_CMD";
+ case WMI_DELETE_CIPHER_KEY_CMDID:
+ return "WMI_DELETE_CIPHER_KEY_CMD";
+ case WMI_ADD_CIPHER_KEY_CMDID:
+ return "WMI_ADD_CIPHER_KEY_CMD";
+ case WMI_SET_APPIE_CMDID:
+ return "WMI_SET_APPIE_CMD";
+ case WMI_CFG_RX_CHAIN_CMDID:
+ return "WMI_CFG_RX_CHAIN_CMD";
+ case WMI_TEMP_SENSE_CMDID:
+ return "WMI_TEMP_SENSE_CMD";
+ case WMI_DEL_STA_CMDID:
+ return "WMI_DEL_STA_CMD";
+ case WMI_DISCONNECT_STA_CMDID:
+ return "WMI_DISCONNECT_STA_CMD";
+ case WMI_RING_BA_EN_CMDID:
+ return "WMI_RING_BA_EN_CMD";
+ case WMI_RING_BA_DIS_CMDID:
+ return "WMI_RING_BA_DIS_CMD";
+ case WMI_RCP_DELBA_CMDID:
+ return "WMI_RCP_DELBA_CMD";
+ case WMI_RCP_ADDBA_RESP_CMDID:
+ return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
+ return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
+ case WMI_PS_DEV_PROFILE_CFG_CMDID:
+ return "WMI_PS_DEV_PROFILE_CFG_CMD";
+ case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_ABORT_SCAN_CMDID:
+ return "WMI_ABORT_SCAN_CMD";
+ case WMI_NEW_STA_CMDID:
+ return "WMI_NEW_STA_CMD";
+ case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+ case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+ case WMI_START_SCHED_SCAN_CMDID:
+ return "WMI_START_SCHED_SCAN_CMD";
+ case WMI_STOP_SCHED_SCAN_CMDID:
+ return "WMI_STOP_SCHED_SCAN_CMD";
+ case WMI_TX_STATUS_RING_ADD_CMDID:
+ return "WMI_TX_STATUS_RING_ADD_CMD";
+ case WMI_RX_STATUS_RING_ADD_CMDID:
+ return "WMI_RX_STATUS_RING_ADD_CMD";
+ case WMI_TX_DESC_RING_ADD_CMDID:
+ return "WMI_TX_DESC_RING_ADD_CMD";
+ case WMI_RX_DESC_RING_ADD_CMDID:
+ return "WMI_RX_DESC_RING_ADD_CMD";
+ case WMI_BCAST_DESC_RING_ADD_CMDID:
+ return "WMI_BCAST_DESC_RING_ADD_CMD";
+ case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
+ case WMI_LINK_STATS_CMDID:
+ return "WMI_LINK_STATS_CMD";
+ case WMI_SW_TX_REQ_EXT_CMDID:
+ return "WMI_SW_TX_REQ_EXT_CMDID";
+ case WMI_FT_AUTH_CMDID:
+ return "WMI_FT_AUTH_CMD";
+ case WMI_FT_REASSOC_CMDID:
+ return "WMI_FT_REASSOC_CMD";
+ case WMI_UPDATE_FT_IES_CMDID:
+ return "WMI_UPDATE_FT_IES_CMD";
+ case WMI_RBUFCAP_CFG_CMDID:
+ return "WMI_RBUFCAP_CFG_CMD";
+ case WMI_TEMP_SENSE_ALL_CMDID:
+ return "WMI_TEMP_SENSE_ALL_CMDID";
+ case WMI_SET_LINK_MONITOR_CMDID:
+ return "WMI_SET_LINK_MONITOR_CMD";
+ default:
+ return "Untracked CMD";
+ }
+}
+
+static const char *eventid2name(u16 eventid)
+{
+ switch (eventid) {
+ case WMI_NOTIFY_REQ_DONE_EVENTID:
+ return "WMI_NOTIFY_REQ_DONE_EVENT";
+ case WMI_DISCONNECT_EVENTID:
+ return "WMI_DISCONNECT_EVENT";
+ case WMI_SW_TX_COMPLETE_EVENTID:
+ return "WMI_SW_TX_COMPLETE_EVENT";
+ case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+ return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+ case WMI_FW_READY_EVENTID:
+ return "WMI_FW_READY_EVENT";
+ case WMI_TRAFFIC_RESUME_EVENTID:
+ return "WMI_TRAFFIC_RESUME_EVENT";
+ case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+ case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+ case WMI_VRING_CFG_DONE_EVENTID:
+ return "WMI_VRING_CFG_DONE_EVENT";
+ case WMI_READY_EVENTID:
+ return "WMI_READY_EVENT";
+ case WMI_RX_MGMT_PACKET_EVENTID:
+ return "WMI_RX_MGMT_PACKET_EVENT";
+ case WMI_TX_MGMT_PACKET_EVENTID:
+ return "WMI_TX_MGMT_PACKET_EVENT";
+ case WMI_SCAN_COMPLETE_EVENTID:
+ return "WMI_SCAN_COMPLETE_EVENT";
+ case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+ return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+ case WMI_CONNECT_EVENTID:
+ return "WMI_CONNECT_EVENT";
+ case WMI_EAPOL_RX_EVENTID:
+ return "WMI_EAPOL_RX_EVENT";
+ case WMI_BA_STATUS_EVENTID:
+ return "WMI_BA_STATUS_EVENT";
+ case WMI_RCP_ADDBA_REQ_EVENTID:
+ return "WMI_RCP_ADDBA_REQ_EVENT";
+ case WMI_DELBA_EVENTID:
+ return "WMI_DELBA_EVENT";
+ case WMI_RING_EN_EVENTID:
+ return "WMI_RING_EN_EVENT";
+ case WMI_DATA_PORT_OPEN_EVENTID:
+ return "WMI_DATA_PORT_OPEN_EVENT";
+ case WMI_AOA_MEAS_EVENTID:
+ return "WMI_AOA_MEAS_EVENT";
+ case WMI_TOF_SESSION_END_EVENTID:
+ return "WMI_TOF_SESSION_END_EVENT";
+ case WMI_TOF_GET_CAPABILITIES_EVENTID:
+ return "WMI_TOF_GET_CAPABILITIES_EVENT";
+ case WMI_TOF_SET_LCR_EVENTID:
+ return "WMI_TOF_SET_LCR_EVENT";
+ case WMI_TOF_SET_LCI_EVENTID:
+ return "WMI_TOF_SET_LCI_EVENT";
+ case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+ return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+ case WMI_TOF_CHANNEL_INFO_EVENTID:
+ return "WMI_TOF_CHANNEL_INFO_EVENT";
+ case WMI_TRAFFIC_SUSPEND_EVENTID:
+ return "WMI_TRAFFIC_SUSPEND_EVENT";
+ case WMI_ECHO_RSP_EVENTID:
+ return "WMI_ECHO_RSP_EVENT";
+ case WMI_LED_CFG_DONE_EVENTID:
+ return "WMI_LED_CFG_DONE_EVENT";
+ case WMI_PCP_STARTED_EVENTID:
+ return "WMI_PCP_STARTED_EVENT";
+ case WMI_PCP_STOPPED_EVENTID:
+ return "WMI_PCP_STOPPED_EVENT";
+ case WMI_GET_SSID_EVENTID:
+ return "WMI_GET_SSID_EVENT";
+ case WMI_GET_PCP_CHANNEL_EVENTID:
+ return "WMI_GET_PCP_CHANNEL_EVENT";
+ case WMI_P2P_CFG_DONE_EVENTID:
+ return "WMI_P2P_CFG_DONE_EVENT";
+ case WMI_PORT_ALLOCATED_EVENTID:
+ return "WMI_PORT_ALLOCATED_EVENT";
+ case WMI_PORT_DELETED_EVENTID:
+ return "WMI_PORT_DELETED_EVENT";
+ case WMI_LISTEN_STARTED_EVENTID:
+ return "WMI_LISTEN_STARTED_EVENT";
+ case WMI_SEARCH_STARTED_EVENTID:
+ return "WMI_SEARCH_STARTED_EVENT";
+ case WMI_DISCOVERY_STOPPED_EVENTID:
+ return "WMI_DISCOVERY_STOPPED_EVENT";
+ case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+ return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+ case WMI_TEMP_SENSE_DONE_EVENTID:
+ return "WMI_TEMP_SENSE_DONE_EVENT";
+ case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+ return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+ case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+ return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+ case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+ case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+ case WMI_START_SCHED_SCAN_EVENTID:
+ return "WMI_START_SCHED_SCAN_EVENT";
+ case WMI_STOP_SCHED_SCAN_EVENTID:
+ return "WMI_STOP_SCHED_SCAN_EVENT";
+ case WMI_SCHED_SCAN_RESULT_EVENTID:
+ return "WMI_SCHED_SCAN_RESULT_EVENT";
+ case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
+ case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
+ return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
+ case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
+ return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
+ case WMI_LINK_STATS_CONFIG_DONE_EVENTID:
+ return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
+ case WMI_LINK_STATS_EVENTID:
+ return "WMI_LINK_STATS_EVENT";
+ case WMI_COMMAND_NOT_SUPPORTED_EVENTID:
+ return "WMI_COMMAND_NOT_SUPPORTED_EVENT";
+ case WMI_FT_AUTH_STATUS_EVENTID:
+ return "WMI_FT_AUTH_STATUS_EVENT";
+ case WMI_FT_REASSOC_STATUS_EVENTID:
+ return "WMI_FT_REASSOC_STATUS_EVENT";
+ case WMI_RBUFCAP_CFG_EVENTID:
+ return "WMI_RBUFCAP_CFG_EVENT";
+ case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
+ return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
+ case WMI_SET_LINK_MONITOR_EVENTID:
+ return "WMI_SET_LINK_MONITOR_EVENT";
+ case WMI_LINK_MONITOR_EVENTID:
+ return "WMI_LINK_MONITOR_EVENT";
+ default:
+ return "Untracked EVENT";
+ }
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid,
+ void *buf, u16 len)
+{
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wmi_cmd_hdr wmi;
+ } __packed cmd = {
+ .hdr = {
+ .type = WIL_MBOX_HDR_TYPE_WMI,
+ .flags = 0,
+ .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+ },
+ .wmi = {
+ .mid = mid,
+ .command_id = cpu_to_le16(cmdid),
+ },
+ };
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+ struct wil6210_mbox_ring_desc d_head;
+ u32 next_head;
+ void __iomem *dst;
+ void __iomem *head = wmi_addr(wil, r->head);
+ uint retry;
+ int rc = 0;
+
+ if (len > r->entry_size - sizeof(cmd)) {
+ wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+ (int)(sizeof(cmd) + len), r->entry_size);
+ return -ERANGE;
+ }
+
+ might_sleep();
+
+ if (!test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "WMI: cannot send command while FW not ready\n");
+ return -EAGAIN;
+ }
+
+ /* Allow sending only suspend / resume commands during susepnd flow */
+ if ((test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resuming, wil->status)) &&
+ ((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
+ (cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
+ wil_err(wil, "WMI: reject send_command during suspend\n");
+ return -EINVAL;
+ }
+
+ if (!head) {
+ wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+ return -EINVAL;
+ }
+
+ wil_halp_vote(wil);
+
+ /* read Tx head till it is not busy */
+ for (retry = 5; retry > 0; retry--) {
+ wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+ if (d_head.sync == 0)
+ break;
+ msleep(20);
+ }
+ if (d_head.sync != 0) {
+ wil_err(wil, "WMI head busy\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ /* next head */
+ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+ wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ /* wait till FW finish with previous command */
+ for (retry = 5; retry > 0; retry--) {
+ if (!test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "WMI: cannot send command while FW not ready\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+ r->tail = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
+ if (next_head != r->tail)
+ break;
+ msleep(20);
+ }
+ if (next_head == r->tail) {
+ wil_err(wil, "WMI ring full\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ dst = wmi_buffer(wil, d_head.addr);
+ if (!dst) {
+ wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+ le32_to_cpu(d_head.addr));
+ rc = -EAGAIN;
+ goto out;
+ }
+ cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+ /* set command */
+ wil_dbg_wmi(wil, "sending %s (0x%04x) [%d] mid %d\n",
+ cmdid2name(cmdid), cmdid, len, mid);
+ wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ sizeof(cmd), true);
+ wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+ wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+ /* mark entry as full */
+ wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
+ /* advance next ptr */
+ wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+ r->head = next_head);
+
+ trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
+
+ /* interrupt to FW */
+ wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+ SW_INT_MBOX);
+
+out:
+ wil_halp_unvote(wil);
+ return rc;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len)
+{
+ int rc;
+
+ mutex_lock(&wil->wmi_mutex);
+ rc = __wmi_send(wil, cmdid, mid, buf, len);
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct wmi_ready_event *evt = d;
+ u8 fw_max_assoc_sta;
+
+ wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
+ wil->fw_version, le32_to_cpu(evt->sw_version),
+ evt->mac, evt->numof_additional_mids);
+ if (evt->numof_additional_mids + 1 < wil->max_vifs) {
+ wil_err(wil, "FW does not support enough MIDs (need %d)",
+ wil->max_vifs - 1);
+ return; /* FW load will fail after timeout */
+ }
+ /* ignore MAC address, we already have it from the boot loader */
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+
+ if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) {
+ wil_dbg_wmi(wil, "rfc calibration result %d\n",
+ evt->rfc_read_calib_result);
+ wil->fw_calib_result = evt->rfc_read_calib_result;
+ }
+
+ fw_max_assoc_sta = WIL6210_RX_DESC_MAX_CID;
+ if (len > offsetof(struct wmi_ready_event, max_assoc_sta) &&
+ evt->max_assoc_sta > 0) {
+ fw_max_assoc_sta = evt->max_assoc_sta;
+ wil_dbg_wmi(wil, "fw reported max assoc sta %d\n",
+ fw_max_assoc_sta);
+
+ if (fw_max_assoc_sta > WIL6210_MAX_CID) {
+ wil_dbg_wmi(wil,
+ "fw max assoc sta %d exceeds max driver supported %d\n",
+ fw_max_assoc_sta, WIL6210_MAX_CID);
+ fw_max_assoc_sta = WIL6210_MAX_CID;
+ }
+ }
+
+ wil->max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta);
+ wil_dbg_wmi(wil, "setting max assoc sta to %d\n", wil->max_assoc_sta);
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ set_bit(wil_status_fwready, wil->status);
+ /* let the reset sequence continue */
+ complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_rx_mgmt_packet_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ u16 d_status;
+
+ if (flen < 0) {
+ wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "MGMT Rx: length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ signal = 100 * data->info.rssi;
+ else
+ signal = data->info.sqi;
+ d_status = le16_to_cpu(data->info.status);
+ fc = rx_mgmt_frame->frame_control;
+
+ wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %s RSSI %d SQI %d%%\n",
+ data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
+ data->info.rssi, data->info.sqi);
+ wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+ le16_to_cpu(fc));
+ wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
+ data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+ struct cfg80211_bss *bss;
+ struct cfg80211_inform_bss bss_data = {
+ .chan = channel,
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .signal = signal,
+ .boottime_ns = ktime_to_ns(ktime_get_boottime()),
+ };
+ u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+ u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+ u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+ const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+ size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+ wil_dbg_wmi(wil, "TSF : 0x%016llx\n", tsf);
+ wil_dbg_wmi(wil, "Beacon interval : %d\n", bi);
+ wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
+ ie_len, true);
+
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
+ bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data,
+ rx_mgmt_frame,
+ d_len, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n",
+ rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+ }
+ } else {
+ mutex_lock(&wil->vif_mutex);
+ cfg80211_rx_mgmt(vif_to_radio_wdev(wil, vif), freq, signal,
+ (void *)rx_mgmt_frame, d_len, 0);
+ mutex_unlock(&wil->vif_mutex);
+ }
+}
+
+static void wmi_evt_tx_mgmt(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wmi_tx_mgmt_packet_event *data = d;
+ struct ieee80211_mgmt *mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+ wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+ flen, true);
+}
+
+static void wmi_evt_scan_complete(struct wil6210_vif *vif, int id,
+ void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ mutex_lock(&wil->vif_mutex);
+ if (vif->scan_request) {
+ struct wmi_scan_complete_event *data = d;
+ int status = le32_to_cpu(data->status);
+ struct cfg80211_scan_info info = {
+ .aborted = ((status != WMI_SCAN_SUCCESS) &&
+ (status != WMI_SCAN_ABORT_REJECTED)),
+ };
+
+ wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status);
+ wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
+ vif->scan_request, info.aborted);
+ del_timer_sync(&vif->scan_timer);
+ cfg80211_scan_done(vif->scan_request, &info);
+ if (vif->mid == 0)
+ wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
+ vif->scan_request = NULL;
+ wake_up_interruptible(&wil->wq);
+ if (vif->p2p.pending_listen_wdev) {
+ wil_dbg_misc(wil, "Scheduling delayed listen\n");
+ schedule_work(&vif->p2p.delayed_listen_work);
+ }
+ } else {
+ wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+ }
+ mutex_unlock(&wil->vif_mutex);
+}
+
+static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct wmi_connect_event *evt = d;
+ int ch; /* channel number */
+ struct station_info *sinfo;
+ u8 *assoc_req_ie, *assoc_resp_ie;
+ size_t assoc_req_ielen, assoc_resp_ielen;
+ /* capinfo(u16) + listen_interval(u16) + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+ int rc;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Connect event too short : %d bytes\n", len);
+ return;
+ }
+ if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+ evt->assoc_resp_len) {
+ wil_err(wil,
+ "Connect event corrupted : %d != %d + %d + %d + %d\n",
+ len, (int)sizeof(*evt), evt->beacon_ie_len,
+ evt->assoc_req_len, evt->assoc_resp_len);
+ return;
+ }
+ if (evt->cid >= wil->max_assoc_sta) {
+ wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
+ return;
+ }
+
+ ch = evt->channel + 1;
+ wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n",
+ evt->bssid, ch, evt->cid, evt->aid);
+ wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ evt->assoc_info, len - sizeof(*evt), true);
+
+ /* figure out IE's */
+ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+ assoc_req_ie_offset];
+ assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+ if (evt->assoc_req_len <= assoc_req_ie_offset) {
+ assoc_req_ie = NULL;
+ assoc_req_ielen = 0;
+ }
+
+ assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+ evt->assoc_req_len +
+ assoc_resp_ie_offset];
+ assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+ if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+ assoc_resp_ie = NULL;
+ assoc_resp_ielen = 0;
+ }
+
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
+ evt->cid);
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
+ mutex_lock(&wil->mutex);
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (!test_bit(wil_vif_fwconnecting, vif->status)) {
+ wil_err(wil, "Not in connecting state\n");
+ mutex_unlock(&wil->mutex);
+ return;
+ }
+ del_timer_sync(&vif->connect_timer);
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ if (wil->sta[evt->cid].status != wil_sta_unused) {
+ wil_err(wil, "AP: Invalid status %d for CID %d\n",
+ wil->sta[evt->cid].status, evt->cid);
+ mutex_unlock(&wil->mutex);
+ return;
+ }
+ }
+
+ ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
+ wil->sta[evt->cid].mid = vif->mid;
+ wil->sta[evt->cid].status = wil_sta_conn_pending;
+
+ rc = wil_ring_init_tx(vif, evt->cid);
+ if (rc) {
+ wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
+ evt->cid, rc);
+ wmi_disconnect_sta(vif, wil->sta[evt->cid].addr,
+ WLAN_REASON_UNSPECIFIED, false);
+ } else {
+ wil_info(wil, "successful connection to CID %d\n", evt->cid);
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (rc) {
+ netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
+ wil_err(wil, "cfg80211_connect_result with failure\n");
+ cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ goto out;
+ } else {
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ cfg80211_ref_bss(wiphy, vif->bss);
+ cfg80211_connect_bss(ndev, evt->bssid, vif->bss,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
+ }
+ vif->bss = NULL;
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+
+ if (rc) {
+ if (disable_ap_sme)
+ /* notify new_sta has failed */
+ cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL);
+ goto out;
+ }
+
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sinfo->generation = wil->sinfo_gen++;
+
+ if (assoc_req_ie) {
+ sinfo->assoc_req_ies = assoc_req_ie;
+ sinfo->assoc_req_ies_len = assoc_req_ielen;
+ }
+
+ cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL);
+
+ kfree(sinfo);
+ } else {
+ wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
+ evt->cid);
+ goto out;
+ }
+
+ wil->sta[evt->cid].status = wil_sta_connected;
+ wil->sta[evt->cid].aid = evt->aid;
+ if (!test_and_set_bit(wil_vif_fwconnected, vif->status))
+ atomic_inc(&wil->connected_vifs);
+ wil_update_net_queues_bh(wil, vif, NULL, false);
+
+out:
+ if (rc) {
+ wil->sta[evt->cid].status = wil_sta_unused;
+ wil->sta[evt->cid].mid = U8_MAX;
+ }
+ clear_bit(wil_vif_fwconnecting, vif->status);
+ mutex_unlock(&wil->mutex);
+}
+
+static void wmi_evt_disconnect(struct wil6210_vif *vif, int id,
+ void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_disconnect_event *evt = d;
+ u16 reason_code = le16_to_cpu(evt->protocol_reason_status);
+
+ wil_info(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ evt->bssid, reason_code, evt->disconnect_reason);
+
+ wil->sinfo_gen++;
+
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "status_resetting, cancel disconnect event\n");
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect_complete(vif, evt->bssid, reason_code);
+ if (disable_ap_sme) {
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+
+ /* disconnect event in disable_ap_sme mode means link loss */
+ switch (wdev->iftype) {
+ /* AP-like interface */
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ /* notify hostapd about link loss */
+ cfg80211_cqm_pktloss_notify(ndev, evt->bssid, 0,
+ GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+ }
+ mutex_unlock(&wil->mutex);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_eapol_rx_event *evt = d;
+ u16 eapol_len = le16_to_cpu(evt->eapol_len);
+ int sz = eapol_len + ETH_HLEN;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ int cid;
+ struct wil_net_stats *stats = NULL;
+
+ wil_dbg_wmi(wil, "EAPOL len %d from %pM MID %d\n", eapol_len,
+ evt->src_mac, vif->mid);
+
+ cid = wil_find_cid(wil, vif->mid, evt->src_mac);
+ if (cid >= 0)
+ stats = &wil->sta[cid].stats;
+
+ if (eapol_len > 196) { /* TODO: revisit size limit */
+ wil_err(wil, "EAPOL too large\n");
+ return;
+ }
+
+ skb = alloc_skb(sz, GFP_KERNEL);
+ if (!skb) {
+ wil_err(wil, "Failed to allocate skb\n");
+ return;
+ }
+
+ eth = skb_put(skb, ETH_HLEN);
+ ether_addr_copy(eth->h_dest, ndev->dev_addr);
+ ether_addr_copy(eth->h_source, evt->src_mac);
+ eth->h_proto = cpu_to_be16(ETH_P_PAE);
+ skb_put_data(skb, evt->eapol, eapol_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += sz;
+ if (stats) {
+ stats->rx_packets++;
+ stats->rx_bytes += sz;
+ }
+ } else {
+ ndev->stats.rx_dropped++;
+ if (stats)
+ stats->rx_dropped++;
+ }
+}
+
+static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_ring_en_event *evt = d;
+ u8 vri = evt->ring_index;
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct wil_sta_info *sta;
+ u8 cid;
+ struct key_params params;
+
+ wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
+
+ if (vri >= ARRAY_SIZE(wil->ring_tx)) {
+ wil_err(wil, "Enable for invalid vring %d\n", vri);
+ return;
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme ||
+ test_bit(wil_vif_ft_roam, vif->status))
+ /* in AP mode with disable_ap_sme that is not FT,
+ * this is done by wil_cfg80211_change_station()
+ */
+ wil->ring_tx_data[vri].dot1x_open = true;
+ if (vri == vif->bcast_ring) /* no BA for bcast */
+ return;
+
+ cid = wil->ring2cid_tid[vri][0];
+ if (!wil_cid_valid(wil, cid)) {
+ wil_err(wil, "invalid cid %d for vring %d\n", cid, vri);
+ return;
+ }
+
+ /* In FT mode we get key but not store it as it is received
+ * before WMI_CONNECT_EVENT received from FW.
+ * wil_set_crypto_rx is called here to reset the security PN
+ */
+ sta = &wil->sta[cid];
+ if (test_bit(wil_vif_ft_roam, vif->status)) {
+ memset(&params, 0, sizeof(params));
+ wil_set_crypto_rx(0, WMI_KEY_USE_PAIRWISE, sta, &params);
+ if (wdev->iftype != NL80211_IFTYPE_AP)
+ clear_bit(wil_vif_ft_roam, vif->status);
+ }
+
+ if (agg_wsize >= 0)
+ wil_addba_tx_request(wil, vri, agg_wsize);
+}
+
+static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
+ void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_ba_status_event *evt = d;
+ struct wil_ring_tx_data *txdata;
+
+ wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
+ evt->ringid,
+ evt->status == WMI_BA_AGREED ? "OK" : "N/A",
+ evt->agg_wsize, __le16_to_cpu(evt->ba_timeout),
+ evt->amsdu ? "+" : "-");
+
+ if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
+ wil_err(wil, "invalid ring id %d\n", evt->ringid);
+ return;
+ }
+
+ if (evt->status != WMI_BA_AGREED) {
+ evt->ba_timeout = 0;
+ evt->agg_wsize = 0;
+ evt->amsdu = 0;
+ }
+
+ txdata = &wil->ring_tx_data[evt->ringid];
+
+ txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
+ txdata->agg_wsize = evt->agg_wsize;
+ txdata->agg_amsdu = evt->amsdu;
+ txdata->addba_in_progress = false;
+}
+
+static void wmi_evt_addba_rx_req(struct wil6210_vif *vif, int id,
+ void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 cid, tid;
+ struct wmi_rcp_addba_req_event *evt = d;
+
+ if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
+ parse_cidxtid(evt->cidxtid, &cid, &tid);
+ } else {
+ cid = evt->cid;
+ tid = evt->tid;
+ }
+ wil_addba_rx_request(wil, vif->mid, cid, tid, evt->dialog_token,
+ evt->ba_param_set, evt->ba_timeout,
+ evt->ba_seq_ctrl);
+}
+
+static void wmi_evt_delba(struct wil6210_vif *vif, int id, void *d, int len)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_delba_event *evt = d;
+ u8 cid, tid;
+ u16 reason = __le16_to_cpu(evt->reason);
+ struct wil_sta_info *sta;
+ struct wil_tid_ampdu_rx *r;
+
+ might_sleep();
+
+ if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
+ parse_cidxtid(evt->cidxtid, &cid, &tid);
+ } else {
+ cid = evt->cid;
+ tid = evt->tid;
+ }
+
+ if (!wil_cid_valid(wil, cid)) {
+ wil_err(wil, "DELBA: Invalid CID %d\n", cid);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n",
+ vif->mid, cid, tid,
+ evt->from_initiator ? "originator" : "recipient",
+ reason);
+ if (!evt->from_initiator) {
+ int i;
+ /* find Tx vring it belongs to */
+ for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
+ if (wil->ring2cid_tid[i][0] == cid &&
+ wil->ring2cid_tid[i][1] == tid) {
+ struct wil_ring_tx_data *txdata =
+ &wil->ring_tx_data[i];
+
+ wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
+ txdata->agg_timeout = 0;
+ txdata->agg_wsize = 0;
+ txdata->addba_in_progress = false;
+
+ break; /* max. 1 matching ring */
+ }
+ }
+ if (i >= ARRAY_SIZE(wil->ring2cid_tid))
+ wil_err(wil, "DELBA: unable to find Tx vring\n");
+ return;
+ }
+
+ sta = &wil->sta[cid];
+
+ spin_lock_bh(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ sta->tid_rx[tid] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+
+ spin_unlock_bh(&sta->tid_rx_lock);
+}
+
+static void
+wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_sched_scan_result_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ struct cfg80211_bss *bss;
+ struct cfg80211_inform_bss bss_data = {
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .boottime_ns = ktime_to_ns(ktime_get_boottime()),
+ };
+
+ if (flen < 0) {
+ wil_err(wil, "sched scan result event too short, len %d\n",
+ len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "sched scan result length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ fc = rx_mgmt_frame->frame_control;
+ if (!ieee80211_is_probe_resp(fc)) {
+ wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+ fc);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ signal = 100 * data->info.rssi;
+ else
+ signal = data->info.sqi;
+
+ wil_dbg_wmi(wil, "sched scan result: channel %d MCS %s RSSI %d\n",
+ data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
+ data->info.rssi);
+ wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+ d_len, data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ bss_data.signal = signal;
+ bss_data.chan = channel;
+ bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, rx_mgmt_frame,
+ d_len, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+ }
+
+ cfg80211_sched_scan_results(wiphy, 0);
+}
+
+static void wil_link_stats_store_basic(struct wil6210_vif *vif,
+ struct wmi_link_stats_basic *basic)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 cid = basic->cid;
+ struct wil_sta_info *sta;
+
+ if (cid >= wil->max_assoc_sta) {
+ wil_err(wil, "invalid cid %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ sta->fw_stats_basic = *basic;
+}
+
+static void wil_link_stats_store_global(struct wil6210_vif *vif,
+ struct wmi_link_stats_global *global)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil->fw_stats_global.stats = *global;
+}
+
+static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
+ bool has_next, void *payload,
+ size_t payload_size)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ size_t hdr_size = sizeof(struct wmi_link_stats_record);
+ size_t stats_size, record_size, expected_size;
+ struct wmi_link_stats_record *hdr;
+
+ if (payload_size < hdr_size) {
+ wil_err(wil, "link stats wrong event size %zu\n", payload_size);
+ return;
+ }
+
+ while (payload_size >= hdr_size) {
+ hdr = payload;
+ stats_size = le16_to_cpu(hdr->record_size);
+ record_size = hdr_size + stats_size;
+
+ if (payload_size < record_size) {
+ wil_err(wil, "link stats payload ended unexpectedly, size %zu < %zu\n",
+ payload_size, record_size);
+ return;
+ }
+
+ switch (hdr->record_type_id) {
+ case WMI_LINK_STATS_TYPE_BASIC:
+ expected_size = sizeof(struct wmi_link_stats_basic);
+ if (stats_size < expected_size) {
+ wil_err(wil, "link stats invalid basic record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+ if (vif->fw_stats_ready) {
+ /* clean old statistics */
+ vif->fw_stats_tsf = 0;
+ vif->fw_stats_ready = false;
+ }
+
+ wil_link_stats_store_basic(vif, payload + hdr_size);
+
+ if (!has_next) {
+ vif->fw_stats_tsf = tsf;
+ vif->fw_stats_ready = true;
+ }
+
+ break;
+ case WMI_LINK_STATS_TYPE_GLOBAL:
+ expected_size = sizeof(struct wmi_link_stats_global);
+ if (stats_size < sizeof(struct wmi_link_stats_global)) {
+ wil_err(wil, "link stats invalid global record size %zu < %zu\n",
+ stats_size, expected_size);
+ return;
+ }
+
+ if (wil->fw_stats_global.ready) {
+ /* clean old statistics */
+ wil->fw_stats_global.tsf = 0;
+ wil->fw_stats_global.ready = false;
+ }
+
+ wil_link_stats_store_global(vif, payload + hdr_size);
+
+ if (!has_next) {
+ wil->fw_stats_global.tsf = tsf;
+ wil->fw_stats_global.ready = true;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ /* skip to next record */
+ payload += record_size;
+ payload_size -= record_size;
+ }
+}
+
+static void
+wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_event *evt = d;
+ size_t payload_size;
+
+ if (len < offsetof(struct wmi_link_stats_event, payload)) {
+ wil_err(wil, "stats event way too short %d\n", len);
+ return;
+ }
+ payload_size = le16_to_cpu(evt->payload_size);
+ if (len < sizeof(struct wmi_link_stats_event) + payload_size) {
+ wil_err(wil, "stats event too short %d\n", len);
+ return;
+ }
+
+ wmi_link_stats_parse(vif, le64_to_cpu(evt->tsf), evt->has_next,
+ evt->payload, payload_size);
+}
+
+/* find cid and ringid for the station vif
+ *
+ * return error, if other interfaces are used or ring was not found
+ */
+static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ int *cid,
+ int *ringid)
+{
+ struct wil_ring *ring;
+ struct wil_ring_tx_data *txdata;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+ int i;
+ u8 lcid;
+
+ if (!(vif->wdev.iftype == NL80211_IFTYPE_STATION ||
+ vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ wil_err(wil, "invalid interface type %d\n", vif->wdev.iftype);
+ return -EINVAL;
+ }
+
+ /* In the STA mode, it is expected to have only one ring
+ * for the AP we are connected to.
+ * find it and return the cid associated with it.
+ */
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ ring = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
+ continue;
+
+ lcid = wil->ring2cid_tid[i][0];
+ if (lcid >= wil->max_assoc_sta) /* skip BCAST */
+ continue;
+
+ wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
+ *cid = lcid;
+ *ringid = i;
+ return 0;
+ }
+
+ wil_dbg_wmi(wil, "find sta cid while no rings active?\n");
+
+ return -ENOENT;
+}
+
+static void
+wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_ft_auth_status_event *data = d;
+ int ie_len = len - offsetof(struct wmi_ft_auth_status_event, ie_info);
+ int rc, cid = 0, ringid = 0;
+ struct cfg80211_ft_event_params ft;
+ u16 d_len;
+ /* auth_alg(u16) + auth_transaction(u16) + status_code(u16) */
+ const size_t auth_ie_offset = sizeof(u16) * 3;
+ struct auth_no_hdr *auth = (struct auth_no_hdr *)data->ie_info;
+
+ /* check the status */
+ if (ie_len >= 0 && data->status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "FT: auth failed. status %d\n", data->status);
+ goto fail;
+ }
+
+ if (ie_len < auth_ie_offset) {
+ wil_err(wil, "FT: auth event too short, len %d\n", len);
+ goto fail;
+ }
+
+ d_len = le16_to_cpu(data->ie_len);
+ if (d_len != ie_len) {
+ wil_err(wil,
+ "FT: auth ie length mismatch, d_len %d should be %d\n",
+ d_len, ie_len);
+ goto fail;
+ }
+
+ if (!test_bit(wil_vif_ft_roam, wil->status)) {
+ wil_err(wil, "FT: Not in roaming state\n");
+ goto fail;
+ }
+
+ if (le16_to_cpu(auth->auth_transaction) != 2) {
+ wil_err(wil, "FT: auth error. auth_transaction %d\n",
+ le16_to_cpu(auth->auth_transaction));
+ goto fail;
+ }
+
+ if (le16_to_cpu(auth->auth_alg) != WLAN_AUTH_FT) {
+ wil_err(wil, "FT: auth error. auth_alg %d\n",
+ le16_to_cpu(auth->auth_alg));
+ goto fail;
+ }
+
+ wil_dbg_wmi(wil, "FT: Auth to %pM successfully\n", data->mac_addr);
+ wil_hex_dump_wmi("FT Auth ies : ", DUMP_PREFIX_OFFSET, 16, 1,
+ data->ie_info, d_len, true);
+
+ /* find cid and ringid */
+ rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
+ if (rc) {
+ wil_err(wil, "No valid cid found\n");
+ goto fail;
+ }
+
+ if (vif->privacy) {
+ /* For secure assoc, remove old keys */
+ rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
+ WMI_KEY_USE_PAIRWISE);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+ goto fail;
+ }
+ rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
+ WMI_KEY_USE_RX_GROUP);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
+ goto fail;
+ }
+ }
+
+ memset(&ft, 0, sizeof(ft));
+ ft.ies = data->ie_info + auth_ie_offset;
+ ft.ies_len = d_len - auth_ie_offset;
+ ft.target_ap = data->mac_addr;
+ cfg80211_ft_event(ndev, &ft);
+
+ return;
+
+fail:
+ wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
+}
+
+static void
+wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct wmi_ft_reassoc_status_event *data = d;
+ int ies_len = len - offsetof(struct wmi_ft_reassoc_status_event,
+ ie_info);
+ int rc = -ENOENT, cid = 0, ringid = 0;
+ int ch; /* channel number (primary) */
+ size_t assoc_req_ie_len = 0, assoc_resp_ie_len = 0;
+ u8 *assoc_req_ie = NULL, *assoc_resp_ie = NULL;
+ /* capinfo(u16) + listen_interval(u16) + current_ap mac addr + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2 + ETH_ALEN;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+ u16 d_len;
+ int freq;
+ struct cfg80211_roam_info info;
+
+ if (ies_len < 0) {
+ wil_err(wil, "ft reassoc event too short, len %d\n", len);
+ goto fail;
+ }
+
+ wil_dbg_wmi(wil, "Reasoc Status event: status=%d, aid=%d",
+ data->status, data->aid);
+ wil_dbg_wmi(wil, " mac_addr=%pM, beacon_ie_len=%d",
+ data->mac_addr, data->beacon_ie_len);
+ wil_dbg_wmi(wil, " reassoc_req_ie_len=%d, reassoc_resp_ie_len=%d",
+ le16_to_cpu(data->reassoc_req_ie_len),
+ le16_to_cpu(data->reassoc_resp_ie_len));
+
+ d_len = le16_to_cpu(data->beacon_ie_len) +
+ le16_to_cpu(data->reassoc_req_ie_len) +
+ le16_to_cpu(data->reassoc_resp_ie_len);
+ if (d_len != ies_len) {
+ wil_err(wil,
+ "ft reassoc ie length mismatch, d_len %d should be %d\n",
+ d_len, ies_len);
+ goto fail;
+ }
+
+ /* check the status */
+ if (data->status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "ft reassoc failed. status %d\n", data->status);
+ goto fail;
+ }
+
+ /* find cid and ringid */
+ rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
+ if (rc) {
+ wil_err(wil, "No valid cid found\n");
+ goto fail;
+ }
+
+ ch = data->channel + 1;
+ wil_info(wil, "FT: Roam %pM channel [%d] cid %d aid %d\n",
+ data->mac_addr, ch, cid, data->aid);
+
+ wil_hex_dump_wmi("reassoc AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ data->ie_info, len - sizeof(*data), true);
+
+ /* figure out IE's */
+ if (le16_to_cpu(data->reassoc_req_ie_len) > assoc_req_ie_offset) {
+ assoc_req_ie = &data->ie_info[assoc_req_ie_offset];
+ assoc_req_ie_len = le16_to_cpu(data->reassoc_req_ie_len) -
+ assoc_req_ie_offset;
+ }
+ if (le16_to_cpu(data->reassoc_resp_ie_len) <= assoc_resp_ie_offset) {
+ wil_err(wil, "FT: reassoc resp ie len is too short, len %d\n",
+ le16_to_cpu(data->reassoc_resp_ie_len));
+ goto fail;
+ }
+
+ assoc_resp_ie = &data->ie_info[le16_to_cpu(data->reassoc_req_ie_len) +
+ assoc_resp_ie_offset];
+ assoc_resp_ie_len = le16_to_cpu(data->reassoc_resp_ie_len) -
+ assoc_resp_ie_offset;
+
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "FT: status_resetting, cancel reassoc event\n");
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
+ mutex_lock(&wil->mutex);
+
+ /* ring modify to set the ring for the roamed AP settings */
+ wil_dbg_wmi(wil,
+ "ft modify tx config for connection CID %d ring %d\n",
+ cid, ringid);
+
+ rc = wil->txrx_ops.tx_ring_modify(vif, ringid, cid, 0);
+ if (rc) {
+ wil_err(wil, "modify TX for CID %d MID %d ring %d failed (%d)\n",
+ cid, vif->mid, ringid, rc);
+ mutex_unlock(&wil->mutex);
+ goto fail;
+ }
+
+ /* Update the driver STA members with the new bss */
+ wil->sta[cid].aid = data->aid;
+ wil->sta[cid].stats.ft_roams++;
+ ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid);
+ mutex_unlock(&wil->mutex);
+ del_timer_sync(&vif->connect_timer);
+
+ cfg80211_ref_bss(wiphy, vif->bss);
+ freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
+
+ memset(&info, 0, sizeof(info));
+ info.links[0].channel = ieee80211_get_channel(wiphy, freq);
+ info.links[0].bss = vif->bss;
+ info.req_ie = assoc_req_ie;
+ info.req_ie_len = assoc_req_ie_len;
+ info.resp_ie = assoc_resp_ie;
+ info.resp_ie_len = assoc_resp_ie_len;
+ cfg80211_roamed(ndev, &info, GFP_KERNEL);
+ vif->bss = NULL;
+
+ return;
+
+fail:
+ wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
+}
+
+static void
+wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_link_monitor_event *evt = d;
+ enum nl80211_cqm_rssi_threshold_event event_type;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "link monitor event too short %d\n", len);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "link monitor event, type %d rssi %d (stored %d)\n",
+ evt->type, evt->rssi_level, wil->cqm_rssi_thold);
+
+ if (evt->type != WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT)
+ /* ignore */
+ return;
+
+ event_type = (evt->rssi_level > wil->cqm_rssi_thold ?
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW);
+ cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
+}
+
+/* Some events are ignored for purpose; and need not be interpreted as
+ * "unhandled events"
+ */
+static void wmi_evt_ignore(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_dbg_wmi(wil, "Ignore event 0x%04x len %d\n", id, len);
+}
+
+static const struct {
+ int eventid;
+ void (*handler)(struct wil6210_vif *vif,
+ int eventid, void *data, int data_len);
+} wmi_evt_handlers[] = {
+ {WMI_READY_EVENTID, wmi_evt_ready},
+ {WMI_FW_READY_EVENTID, wmi_evt_ignore},
+ {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
+ {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
+ {WMI_CONNECT_EVENTID, wmi_evt_connect},
+ {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
+ {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+ {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
+ {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
+ {WMI_DELBA_EVENTID, wmi_evt_delba},
+ {WMI_RING_EN_EVENTID, wmi_evt_ring_en},
+ {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
+ {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
+ {WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
+ {WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status},
+ {WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status},
+ {WMI_LINK_MONITOR_EVENTID, wmi_evt_link_monitor},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+ struct wil6210_mbox_ring_desc d_tail;
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ struct pending_wmi_event *evt;
+ u8 *cmd;
+ void __iomem *src;
+ ulong flags;
+ unsigned n;
+ unsigned int num_immed_reply = 0;
+
+ if (!test_bit(wil_status_mbox_ready, wil->status)) {
+ wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
+ return;
+ }
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil, "suspended. cannot handle WMI event\n");
+ return;
+ }
+
+ for (n = 0;; n++) {
+ u16 len;
+ bool q;
+ bool immed_reply = false;
+
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail == r->head)
+ break;
+
+ wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
+ r->head, r->tail);
+ /* read cmd descriptor from tail */
+ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+ sizeof(struct wil6210_mbox_ring_desc));
+ if (d_tail.sync == 0) {
+ wil_err(wil, "Mbox evt not owned by FW?\n");
+ break;
+ }
+
+ /* read cmd header from descriptor */
+ if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+ wil_err(wil, "Mbox evt at 0x%08x?\n",
+ le32_to_cpu(d_tail.addr));
+ break;
+ }
+ len = le16_to_cpu(hdr.len);
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+
+ /* read cmd buffer from descriptor */
+ src = wmi_buffer(wil, d_tail.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+ event.wmi) + len, 4),
+ GFP_KERNEL);
+ if (!evt)
+ break;
+
+ evt->event.hdr = hdr;
+ cmd = (void *)&evt->event.wmi;
+ wil_memcpy_fromio_32(cmd, src, len);
+ /* mark entry as empty */
+ wil_w(wil, r->tail +
+ offsetof(struct wil6210_mbox_ring_desc, sync), 0);
+ /* indicate */
+ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wmi_cmd_hdr))) {
+ struct wmi_cmd_hdr *wmi = &evt->event.wmi;
+ u16 id = le16_to_cpu(wmi->command_id);
+ u8 mid = wmi->mid;
+ u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
+ if (test_bit(wil_status_resuming, wil->status)) {
+ if (id == WMI_TRAFFIC_RESUME_EVENTID)
+ clear_bit(wil_status_resuming,
+ wil->status);
+ else
+ wil_err(wil,
+ "WMI evt %d while resuming\n",
+ id);
+ }
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ if (wil->reply_id && wil->reply_id == id &&
+ wil->reply_mid == mid) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ immed_reply = true;
+ }
+ if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
+ wil_dbg_wmi(wil,
+ "set suspend_resp_rcvd\n");
+ wil->suspend_resp_rcvd = true;
+ }
+ }
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+ eventid2name(id), id, wmi->mid, tstamp);
+ trace_wil6210_wmi_event(wmi, &wmi[1],
+ len - sizeof(*wmi));
+ }
+ wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ &evt->event.hdr, sizeof(hdr) + len, true);
+
+ /* advance tail */
+ r->tail = r->base + ((r->tail - r->base +
+ sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+ wil_w(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
+
+ if (immed_reply) {
+ wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n",
+ wil->reply_id);
+ kfree(evt);
+ num_immed_reply++;
+ complete(&wil->wmi_call);
+ } else {
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
+ wil_dbg_wmi(wil, "queue_work -> %d\n", q);
+ }
+ }
+ /* normally, 1 event per IRQ should be processed */
+ wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n",
+ n - num_immed_reply, num_immed_reply);
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
+ u16 reply_id, void *reply, u16 reply_size, int to_msec)
+{
+ int rc;
+ unsigned long remain;
+ ulong flags;
+
+ mutex_lock(&wil->wmi_mutex);
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ wil->reply_id = reply_id;
+ wil->reply_mid = mid;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ reinit_completion(&wil->wmi_call);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ rc = __wmi_send(wil, cmdid, mid, buf, len);
+ if (rc)
+ goto out;
+
+ remain = wait_for_completion_timeout(&wil->wmi_call,
+ msecs_to_jiffies(to_msec));
+ if (0 == remain) {
+ wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+ cmdid, reply_id, to_msec);
+ rc = -ETIME;
+ } else {
+ wil_dbg_wmi(wil,
+ "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+ cmdid, reply_id,
+ to_msec - jiffies_to_msecs(remain));
+ }
+
+out:
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ wil->reply_id = 0;
+ wil->reply_mid = U8_MAX;
+ wil->reply_buf = NULL;
+ wil->reply_size = 0;
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_echo_cmd cmd = {
+ .value = cpu_to_le32(0x12345678),
+ };
+
+ return wmi_call(wil, WMI_ECHO_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_ECHO_RSP_EVENTID, NULL, 0,
+ WIL_WMI_CALL_GENERAL_TO_MS);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_set_mac_address_cmd cmd;
+
+ ether_addr_copy(cmd.mac, addr);
+
+ wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
+
+ return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, vif->mid,
+ &cmd, sizeof(cmd));
+}
+
+int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc = 0;
+ struct wmi_led_cfg_cmd cmd = {
+ .led_mode = enable,
+ .id = led_id,
+ .slow_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].on_ms),
+ .slow_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].off_ms),
+ .medium_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].on_ms),
+ .medium_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].off_ms),
+ .fast_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].on_ms),
+ .fast_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].off_ms),
+ .led_polarity = led_polarity,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_led_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le32(WMI_FW_STATUS_FAILURE)},
+ };
+
+ if (led_id == WIL_LED_INVALID_ID)
+ goto out;
+
+ if (led_id > WIL_LED_MAX_ID) {
+ wil_err(wil, "Invalid led id %d\n", led_id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil,
+ "%s led %d\n",
+ enable ? "enabling" : "disabling", led_id);
+
+ rc = wmi_call(wil, WMI_LED_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ goto out;
+
+ if (reply.evt.status) {
+ wil_err(wil, "led %d cfg failed with status %d\n",
+ led_id, le32_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+out:
+ return rc;
+}
+
+int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+
+ struct wmi_rbufcap_cfg_cmd cmd = {
+ .enable = enable,
+ .rx_desc_threshold = cpu_to_le16(threshold),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rbufcap_cfg_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ rc = wmi_call(wil, WMI_RBUFCAP_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_RBUFCAP_CFG_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "RBUFCAP_CFG failed. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype,
+ u8 chan, u8 wmi_edmg_chan, u8 hidden_ssid, u8 is_go)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+
+ struct wmi_pcp_start_cmd cmd = {
+ .bcon_interval = cpu_to_le16(bi),
+ .network_type = wmi_nettype,
+ .disable_sec_offload = 1,
+ .channel = chan - 1,
+ .edmg_channel = wmi_edmg_chan,
+ .pcp_max_assoc_sta = wil->max_assoc_sta,
+ .hidden_ssid = hidden_ssid,
+ .is_go = is_go,
+ .ap_sme_offload_mode = disable_ap_sme ?
+ WMI_AP_SME_OFFLOAD_PARTIAL :
+ WMI_AP_SME_OFFLOAD_FULL,
+ .abft_len = wil->abft_len,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_pcp_started_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ if (!vif->privacy)
+ cmd.disable_sec = 1;
+
+ if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
+ (cmd.pcp_max_assoc_sta <= 0)) {
+ wil_err(wil, "unexpected max_assoc_sta %d\n",
+ cmd.pcp_max_assoc_sta);
+ return -EOPNOTSUPP;
+ }
+
+ if (disable_ap_sme &&
+ !test_bit(WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL,
+ wil->fw_capabilities)) {
+ wil_err(wil, "disable_ap_sme not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Processing time may be huge, in case of secure AP it takes about
+ * 3500ms for FW to start AP
+ */
+ rc = wmi_call(wil, WMI_PCP_START_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
+ rc = -EINVAL;
+
+ if (wmi_nettype != WMI_NETTYPE_P2P)
+ /* Don't fail due to error in the led configuration */
+ wmi_led_cfg(wil, true);
+
+ return rc;
+}
+
+int wmi_pcp_stop(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+
+ rc = wmi_led_cfg(wil, false);
+ if (rc)
+ return rc;
+
+ return wmi_call(wil, WMI_PCP_STOP_CMDID, vif->mid, NULL, 0,
+ WMI_PCP_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_PCP_STOP_TO_MS);
+}
+
+int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_set_ssid_cmd cmd = {
+ .ssid_len = cpu_to_le32(ssid_len),
+ };
+
+ if (ssid_len > sizeof(cmd.ssid))
+ return -EINVAL;
+
+ memcpy(cmd.ssid, ssid, ssid_len);
+
+ return wmi_send(wil, WMI_SET_SSID_CMDID, vif->mid, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_ssid_cmd cmd;
+ } __packed reply;
+ int len; /* reply.cmd.ssid_len in CPU order */
+
+ memset(&reply, 0, sizeof(reply));
+
+ rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0,
+ WMI_GET_SSID_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(reply.cmd.ssid_len);
+ if (len > sizeof(reply.cmd.ssid))
+ return -EINVAL;
+
+ *ssid_len = len;
+ memcpy(ssid, reply.cmd.ssid, len);
+
+ return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wmi_set_pcp_channel_cmd cmd = {
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, vif->mid,
+ &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_pcp_channel_cmd cmd;
+ } __packed reply;
+
+ memset(&reply, 0, sizeof(reply));
+
+ rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0,
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.cmd.channel > 3)
+ return -EINVAL;
+
+ *channel = reply.cmd.channel + 1;
+
+ return 0;
+}
+
+int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_p2p_cfg_cmd cmd = {
+ .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
+ .bcon_interval = cpu_to_le16(bi),
+ .channel = channel - 1,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_p2p_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
+
+ rc = wmi_call(wil, WMI_P2P_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_start_listen(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
+
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "device failed to start listen. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_start_search(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_search_started_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
+
+ rc = wmi_call(wil, WMI_START_SEARCH_CMDID, vif->mid, NULL, 0,
+ WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "device failed to start search. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_stop_discovery(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+
+ wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
+
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_CALL_GENERAL_TO_MS);
+
+ if (rc)
+ wil_err(wil, "Failed to stop discovery\n");
+
+ return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_vif *vif, u8 key_index,
+ const void *mac_addr, int key_usage)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_delete_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ };
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, vif->mid,
+ &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
+ const void *mac_addr, int key_len, const void *key,
+ int key_usage)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_add_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ .key_usage = key_usage,
+ .key_len = key_len,
+ };
+
+ if (key_len > sizeof(cmd.key))
+ return -EINVAL;
+
+ /* key len = 0 is allowed only for usage of WMI_KEY_USE_APPLY */
+ if ((key_len == 0 || !key) &&
+ key_usage != WMI_KEY_USE_APPLY_PTK)
+ return -EINVAL;
+
+ if (key)
+ memcpy(cmd.key, key, key_len);
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, vif->mid,
+ &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ static const char *const names[] = {
+ [WMI_FRAME_BEACON] = "BEACON",
+ [WMI_FRAME_PROBE_REQ] = "PROBE_REQ",
+ [WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP",
+ [WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ",
+ [WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP",
+ };
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+ struct wmi_set_appie_cmd *cmd;
+
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!ie)
+ ie_len = 0;
+
+ cmd->mgmt_frm_type = type;
+ /* BUG: FW API define ieLen as u8. Will fix FW */
+ cmd->ie_len = cpu_to_le16(ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len);
+ kfree(cmd);
+out:
+ if (rc) {
+ const char *name = type < ARRAY_SIZE(names) ?
+ names[type] : "??";
+ wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+ }
+
+ return rc;
+}
+
+int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u16 len;
+ struct wmi_update_ft_ies_cmd *cmd;
+ int rc;
+
+ if (!ie)
+ ie_len = 0;
+
+ len = sizeof(struct wmi_update_ft_ies_cmd) + ie_len;
+ if (len < ie_len) {
+ wil_err(wil, "wraparound. ie len %d\n", ie_len);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cmd->ie_len = cpu_to_le16(ie_len);
+ if (ie_len)
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
+ kfree(cmd);
+
+out:
+ if (rc)
+ wil_err(wil, "update ft ies failed : %d\n", rc);
+
+ return rc;
+}
+
+/**
+ * wmi_rxon - turn radio on/off
+ * @wil: driver data
+ * @on: turn on if true, off otherwise
+ *
+ * Only switch radio. Channel should be set separately.
+ * No timeout for rxon - radio turned on forever unless some other call
+ * turns it off
+ */
+int wmi_rxon(struct wil6210_priv *wil, bool on)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_info(wil, "(%s)\n", on ? "on" : "off");
+
+ if (on) {
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID,
+ &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
+ rc = -EINVAL;
+ } else {
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ }
+
+ return rc;
+}
+
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(
+ wil_mtu2macbuf(wil->rx_buf_len)),
+ .ring_mem_base = cpu_to_le64(vring->pa),
+ .ring_size = cpu_to_le16(vring->size),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .reorder_type = WMI_RX_SW_REORDER,
+ .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+ int rc;
+
+ memset(&evt, 0, sizeof(evt));
+
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(WMI_SNIFFER_PHY_INFO_DISABLED);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
+ } else {
+ /* Initialize offload (in non-sniffer mode).
+ * Linux IP stack always calculates IP checksum
+ * HW always calculate TCP/UDP checksum
+ */
+ cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
+ }
+
+ if (rx_align_2)
+ cmd.l2_802_3_offload_ctrl |=
+ L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
+
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ return rc;
+
+ if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
+ rc = -EINVAL;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ return rc;
+}
+
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_temp_sense_cmd cmd = {
+ .measure_baseband_en = cpu_to_le32(!!t_bb),
+ .measure_rf_en = cpu_to_le32(!!t_rf),
+ .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_temp_sense_done_event evt;
+ } __packed reply;
+
+ memset(&reply, 0, sizeof(reply));
+
+ rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (t_bb)
+ *t_bb = le32_to_cpu(reply.evt.baseband_t1000);
+ if (t_rf)
+ *t_rf = le32_to_cpu(reply.evt.rf_t1000);
+
+ return 0;
+}
+
+int wmi_get_all_temperatures(struct wil6210_priv *wil,
+ struct wmi_temp_sense_all_done_event
+ *sense_all_evt)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_temp_sense_all_cmd cmd = {
+ .measure_baseband_en = true,
+ .measure_rf_en = true,
+ .measure_mode = TEMPERATURE_MEASURE_NOW,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_temp_sense_all_done_event evt;
+ } __packed reply;
+
+ if (!sense_all_evt) {
+ wil_err(wil, "Invalid sense_all_evt value\n");
+ return -EINVAL;
+ }
+
+ memset(&reply, 0, sizeof(reply));
+ reply.evt.status = WMI_FW_STATUS_FAILURE;
+ rc = wmi_call(wil, WMI_TEMP_SENSE_ALL_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TEMP_SENSE_ALL_DONE_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
+ wil_err(wil, "Failed getting TEMP_SENSE_ALL\n");
+ return -EINVAL;
+ }
+
+ memcpy(sense_all_evt, &reply.evt, sizeof(reply.evt));
+ return 0;
+}
+
+int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
+ bool del_sta)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_disconnect_sta_cmd disc_sta_cmd = {
+ .disconnect_reason = cpu_to_le16(reason),
+ };
+ struct wmi_del_sta_cmd del_sta_cmd = {
+ .disconnect_reason = cpu_to_le16(reason),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_disconnect_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
+
+ memset(&reply, 0, sizeof(reply));
+ vif->locally_generated_disc = true;
+ if (del_sta) {
+ ether_addr_copy(del_sta_cmd.dst_mac, mac);
+ rc = wmi_call(wil, WMI_DEL_STA_CMDID, vif->mid, &del_sta_cmd,
+ sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID,
+ &reply, sizeof(reply), 1000);
+ } else {
+ ether_addr_copy(disc_sta_cmd.dst_mac, mac);
+ rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, vif->mid,
+ &disc_sta_cmd, sizeof(disc_sta_cmd),
+ WMI_DISCONNECT_EVENTID,
+ &reply, sizeof(reply), 1000);
+ }
+ /* failure to disconnect in reasonable time treated as FW error */
+ if (rc) {
+ wil_fw_error_recovery(wil);
+ return rc;
+ }
+ wil->sinfo_gen++;
+
+ return 0;
+}
+
+int wmi_addba(struct wil6210_priv *wil, u8 mid,
+ u8 ringid, u8 size, u16 timeout)
+{
+ u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
+ test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
+ wil->amsdu_en;
+ struct wmi_ring_ba_en_cmd cmd = {
+ .ring_id = ringid,
+ .agg_max_wsize = size,
+ .ba_timeout = cpu_to_le16(timeout),
+ .amsdu = amsdu,
+ };
+
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
+ ringid, size, timeout, amsdu);
+
+ return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
+}
+
+int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
+{
+ struct wmi_ring_ba_dis_cmd cmd = {
+ .ring_id = ringid,
+ .reason = cpu_to_le16(reason),
+ };
+
+ wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
+
+ return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
+}
+
+int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason)
+{
+ struct wmi_rcp_delba_cmd cmd = {
+ .reason = cpu_to_le16(reason),
+ };
+
+ if (cid >= WIL6210_RX_DESC_MAX_CID) {
+ cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
+ cmd.cid = cid;
+ cmd.tid = tid;
+ } else {
+ cmd.cidxtid = mk_cidxtid(cid, tid);
+ }
+
+ wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cid,
+ tid, reason);
+
+ return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd));
+}
+
+int wmi_addba_rx_resp(struct wil6210_priv *wil,
+ u8 mid, u8 cid, u8 tid, u8 token,
+ u16 status, bool amsdu, u16 agg_wsize, u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_cmd cmd = {
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (controlled by FW)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
+ };
+
+ if (cid >= WIL6210_RX_DESC_MAX_CID) {
+ cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
+ cmd.cid = cid;
+ cmd.tid = tid;
+ } else {
+ cmd.cidxtid = mk_cidxtid(cid, tid);
+ }
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
+ mid, cid, tid, agg_wsize,
+ timeout, status, amsdu ? "+" : "-");
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, mid, &cmd, sizeof(cmd),
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 token, u16 status, bool amsdu, u16 agg_wsize,
+ u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_edma_cmd cmd = {
+ .cid = cid,
+ .tid = tid,
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (controlled by FW)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ /* route all the connections to status ring 0 */
+ .status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
+ };
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
+ cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
+ WIL_DEFAULT_RX_STATUS_RING_ID);
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
+ sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_ps_dev_profile_cfg_cmd cmd = {
+ .ps_profile = ps_profile,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_ps_dev_profile_cfg_event evt;
+ } __packed reply = {
+ .evt = {.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR)},
+ };
+ u32 status;
+
+ wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile);
+
+ rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ status = le32_to_cpu(reply.evt.status);
+
+ if (status != WMI_PS_CFG_CMD_STATUS_SUCCESS) {
+ wil_err(wil, "ps dev profile cfg failed with status %d\n",
+ status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_set_mgmt_retry_limit_cmd cmd = {
+ .mgmt_retry_limit = retry_short,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_set_mgmt_retry_limit_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short);
+
+ if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "set mgmt retry limit failed with status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_get_mgmt_retry_limit_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "getting mgmt retry short\n");
+
+ if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0,
+ WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (retry_short)
+ *retry_short = reply.evt.mgmt_retry_limit;
+
+ return 0;
+}
+
+int wmi_abort_scan(struct wil6210_vif *vif)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+
+ wil_dbg_wmi(wil, "sending WMI_ABORT_SCAN_CMDID\n");
+
+ rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, vif->mid, NULL, 0);
+ if (rc)
+ wil_err(wil, "Failed to abort scan (%d)\n", rc);
+
+ return rc;
+}
+
+int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_new_sta_cmd cmd = {
+ .aid = aid,
+ };
+
+ wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid);
+
+ ether_addr_copy(cmd.dst_mac, mac);
+
+ rc = wmi_send(wil, WMI_NEW_STA_CMDID, vif->mid, &cmd, sizeof(cmd));
+ if (rc)
+ wil_err(wil, "Failed to send new sta (%d)\n", rc);
+
+ return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct pending_wmi_event *evt, *t;
+
+ wil_dbg_wmi(wil, "event_flush\n");
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+}
+
+static const char *suspend_status2name(u8 status)
+{
+ switch (status) {
+ case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+ return "LINK_NOT_IDLE";
+ case WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT:
+ return "DISCONNECT";
+ case WMI_TRAFFIC_SUSPEND_REJECTED_OTHER:
+ return "OTHER";
+ default:
+ return "Untracked status";
+ }
+}
+
+int wmi_suspend(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_traffic_suspend_cmd cmd = {
+ .wakeup_trigger = wil->wakeup_trigger,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_suspend_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE},
+ };
+
+ u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
+
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
+ suspend_to);
+ if (rc) {
+ wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
+ if (rc == -ETIME)
+ /* wmi_call TO */
+ wil->suspend_stats.rejected_by_device++;
+ else
+ wil->suspend_stats.rejected_by_host++;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
+
+ rc = wait_event_interruptible_timeout(wil->wq,
+ wil->suspend_resp_comp,
+ msecs_to_jiffies(suspend_to));
+ if (rc == 0) {
+ wil_err(wil, "TO waiting for suspend_response_completed\n");
+ if (wil->suspend_resp_rcvd)
+ /* Device responded but we TO due to another reason */
+ wil->suspend_stats.rejected_by_host++;
+ else
+ wil->suspend_stats.rejected_by_device++;
+ rc = -EBUSY;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
+ if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+ wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+ suspend_status2name(reply.evt.status));
+ wil->suspend_stats.rejected_by_device++;
+ }
+ rc = reply.evt.status;
+
+out:
+ wil->suspend_resp_rcvd = false;
+ wil->suspend_resp_comp = false;
+
+ return rc;
+}
+
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+ string[0] = '\0';
+
+ if (!triggers) {
+ strlcat(string, " UNKNOWN", str_size);
+ return;
+ }
+
+ if (triggers & WMI_RESUME_TRIGGER_HOST)
+ strlcat(string, " HOST", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+ strlcat(string, " UCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+ strlcat(string, " BCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+ strlcat(string, " WMI_EVT", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_DISCONNECT)
+ strlcat(string, " DISCONNECT", str_size);
+}
+
+int wmi_resume(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ char string[100];
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_traffic_resume_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_TRAFFIC_RESUME_FAILED,
+ .resume_triggers =
+ cpu_to_le32(WMI_RESUME_TRIGGER_UNKNOWN)},
+ };
+
+ rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, vif->mid, NULL, 0,
+ WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
+ WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
+ if (rc)
+ return rc;
+ resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+ sizeof(string));
+ wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+ reply.evt.status ? "failed" : "passed", string,
+ le32_to_cpu(reply.evt.resume_triggers));
+
+ return reply.evt.status;
+}
+
+int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
+ const u8 *mac, enum nl80211_iftype iftype)
+{
+ int rc;
+ struct wmi_port_allocate_cmd cmd = {
+ .mid = mid,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_port_allocated_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_dbg_misc(wil, "port allocate, mid %d iftype %d, mac %pM\n",
+ mid, iftype, mac);
+
+ ether_addr_copy(cmd.mac, mac);
+ switch (iftype) {
+ case NL80211_IFTYPE_STATION:
+ cmd.port_role = WMI_PORT_STA;
+ break;
+ case NL80211_IFTYPE_AP:
+ cmd.port_role = WMI_PORT_AP;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ cmd.port_role = WMI_PORT_P2P_CLIENT;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ cmd.port_role = WMI_PORT_P2P_GO;
+ break;
+ /* what about monitor??? */
+ default:
+ wil_err(wil, "unsupported iftype: %d\n", iftype);
+ return -EINVAL;
+ }
+
+ rc = wmi_call(wil, WMI_PORT_ALLOCATE_CMDID, mid,
+ &cmd, sizeof(cmd),
+ WMI_PORT_ALLOCATED_EVENTID, &reply,
+ sizeof(reply), 300);
+ if (rc) {
+ wil_err(wil, "failed to allocate port, status %d\n", rc);
+ return rc;
+ }
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_PORT_ALLOCATE returned status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_port_delete(struct wil6210_priv *wil, u8 mid)
+{
+ int rc;
+ struct wmi_port_delete_cmd cmd = {
+ .mid = mid,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_port_deleted_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ wil_dbg_misc(wil, "port delete, mid %d\n", mid);
+
+ rc = wmi_call(wil, WMI_PORT_DELETE_CMDID, mid,
+ &cmd, sizeof(cmd),
+ WMI_PORT_DELETED_EVENTID, &reply,
+ sizeof(reply), 2000);
+ if (rc) {
+ wil_err(wil, "failed to delete port, status %d\n", rc);
+ return rc;
+ }
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_PORT_DELETE returned status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool wmi_evt_call_handler(struct wil6210_vif *vif, int id,
+ void *d, int len)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+ if (wmi_evt_handlers[i].eventid == id) {
+ wmi_evt_handlers[i].handler(vif, id, d, len);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+ struct wil6210_mbox_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len);
+ struct wil6210_vif *vif;
+
+ if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wmi_cmd_hdr))) {
+ struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
+ void *evt_data = (void *)(&wmi[1]);
+ u16 id = le16_to_cpu(wmi->command_id);
+ u8 mid = wmi->mid;
+
+ wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x,%d)\n",
+ eventid2name(id), id, wil->reply_id,
+ wil->reply_mid);
+
+ if (mid == MID_BROADCAST)
+ mid = 0;
+ if (mid >= GET_MAX_VIFS(wil)) {
+ wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
+ mid);
+ return;
+ }
+ vif = wil->vifs[mid];
+ if (!vif) {
+ wil_dbg_wmi(wil, "event for empty VIF(%d), skipped\n",
+ mid);
+ return;
+ }
+
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id &&
+ wil->reply_mid == mid) {
+ if (wil->reply_buf) {
+ /* event received while wmi_call is waiting
+ * with a buffer. Such event should be handled
+ * in wmi_recv_cmd function. Handling the event
+ * here means a previous wmi_call was timeout.
+ * Drop the event and do not handle it.
+ */
+ wil_err(wil,
+ "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
+ id, eventid2name(id));
+ return;
+ }
+
+ wmi_evt_call_handler(vif, id, evt_data,
+ len - sizeof(*wmi));
+ wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n",
+ id);
+ complete(&wil->wmi_call);
+ return;
+ }
+ /* unsolicited event */
+ /* search for handler */
+ if (!wmi_evt_call_handler(vif, id, evt_data,
+ len - sizeof(*wmi))) {
+ wil_info(wil, "Unhandled event 0x%04x\n", id);
+ }
+ } else {
+ wil_err(wil, "Unknown event type\n");
+ print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr, sizeof(*hdr) + len, true);
+ }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct list_head *ret = NULL;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ ret = wil->pending_wmi_ev.next;
+ list_del(ret);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_event_worker);
+ struct pending_wmi_event *evt;
+ struct list_head *lh;
+
+ wil_dbg_wmi(wil, "event_worker: Start\n");
+ while ((lh = next_wmi_ev(wil)) != NULL) {
+ evt = list_entry(lh, struct pending_wmi_event, list);
+ wmi_event_handle(wil, &evt->event.hdr);
+ kfree(evt);
+ }
+ wil_dbg_wmi(wil, "event_worker: Finished\n");
+}
+
+bool wil_is_wmi_idle(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ bool rc = false;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ /* Check if there are pending WMI events in the events queue */
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ wil_dbg_pm(wil, "Pending WMI events in queue\n");
+ goto out;
+ }
+
+ /* Check if there is a pending WMI call */
+ if (wil->reply_id) {
+ wil_dbg_pm(wil, "Pending WMI call\n");
+ goto out;
+ }
+
+ /* Check if there are pending RX events in mbox */
+ r->head = wil_r(wil, RGF_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail != r->head)
+ wil_dbg_pm(wil, "Pending WMI mbox events\n");
+ else
+ rc = true;
+
+out:
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ return rc;
+}
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_sets,
+ int n_match_sets)
+{
+ int i;
+
+ if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+ wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+ n_match_sets, WMI_MAX_PNO_SSID_NUM);
+ n_match_sets = WMI_MAX_PNO_SSID_NUM;
+ }
+ cmd->num_of_ssids = n_match_sets;
+
+ for (i = 0; i < n_match_sets; i++) {
+ struct wmi_sched_scan_ssid_match *wmi_match =
+ &cmd->ssid_for_match[i];
+ struct cfg80211_match_set *cfg_match = &match_sets[i];
+ int j;
+
+ wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+ memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+ min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+ wmi_match->rssi_threshold = S8_MIN;
+ if (cfg_match->rssi_thold >= S8_MIN &&
+ cfg_match->rssi_thold <= S8_MAX)
+ wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+ for (j = 0; j < n_ssids; j++)
+ if (wmi_match->ssid_len == ssids[j].ssid_len &&
+ memcmp(wmi_match->ssid, ssids[j].ssid,
+ wmi_match->ssid_len) == 0)
+ wmi_match->add_ssid_to_probe = true;
+ }
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ u32 n_channels,
+ struct ieee80211_channel **channels)
+{
+ int i;
+
+ if (n_channels > WMI_MAX_CHANNEL_NUM) {
+ wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+ n_channels, WMI_MAX_CHANNEL_NUM);
+ n_channels = WMI_MAX_CHANNEL_NUM;
+ }
+ cmd->num_of_channels = n_channels;
+
+ for (i = 0; i < n_channels; i++) {
+ struct ieee80211_channel *cfg_chan = channels[i];
+
+ cmd->channel_list[i] = cfg_chan->hw_value - 1;
+ }
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_sched_scan_plan *scan_plans,
+ int n_scan_plans)
+{
+ int i;
+
+ if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+ wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+ n_scan_plans, WMI_MAX_PLANS_NUM);
+ n_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ for (i = 0; i < n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+ cmd->scan_plans[i].interval_sec =
+ cpu_to_le16(cfg_plan->interval);
+ cmd->scan_plans[i].num_of_iterations =
+ cpu_to_le16(cfg_plan->iterations);
+ }
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_start_sched_scan_cmd cmd = {
+ .min_rssi_threshold = S8_MIN,
+ .initial_delay_sec = cpu_to_le16(request->delay),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_start_sched_scan_event evt;
+ } __packed reply = {
+ .evt = {.result = WMI_PNO_REJECT},
+ };
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (request->min_rssi_thold >= S8_MIN &&
+ request->min_rssi_thold <= S8_MAX)
+ cmd.min_rssi_threshold = request->min_rssi_thold;
+
+ wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+ request->match_sets, request->n_match_sets);
+ wmi_sched_scan_set_channels(wil, &cmd,
+ request->n_channels, request->channels);
+ wmi_sched_scan_set_plans(wil, &cmd,
+ request->scan_plans, request->n_scan_plans);
+
+ rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, vif->mid,
+ &cmd, sizeof(cmd),
+ WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "start sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_stop_sched_scan_event evt;
+ } __packed reply = {
+ .evt = {.result = WMI_PNO_REJECT},
+ };
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, vif->mid, NULL, 0,
+ WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "stop sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
+{
+ size_t total;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_cmd *cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid);
+ wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+
+ if (len < sizeof(struct ieee80211_hdr_3addr))
+ return -EINVAL;
+
+ total = sizeof(*cmd) + len;
+ if (total < len) {
+ wil_err(wil, "mgmt_tx invalid len %zu\n", len);
+ return -EINVAL;
+ }
+
+ cmd = kmalloc(total, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
+ evt.evt.status);
+ rc = -EAGAIN;
+ }
+
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
+ u8 channel, u16 duration_ms)
+{
+ size_t total;
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_ext_cmd *cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ wil_dbg_wmi(wil, "mgmt_tx_ext mid %d channel %d duration %d\n",
+ vif->mid, channel, duration_ms);
+ wil_hex_dump_wmi("mgmt_tx_ext frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+
+ if (len < sizeof(struct ieee80211_hdr_3addr)) {
+ wil_err(wil, "short frame. len %zu\n", len);
+ return -EINVAL;
+ }
+
+ total = sizeof(*cmd) + len;
+ if (total < len) {
+ wil_err(wil, "mgmt_tx_ext invalid len %zu\n", len);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(total, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+ cmd->channel = channel - 1;
+ cmd->duration_ms = cpu_to_le16(duration_ms);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
+ evt.evt.status);
+ rc = -EAGAIN;
+ }
+
+ kfree(cmd);
+
+ return rc;
+}
+
+int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
+{
+ int rc;
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ struct wmi_tx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ },
+ .irq_index = WIL_TX_STATUS_IRQ_IDX
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_id = ring_id;
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct wmi_cfg_def_rx_offload_cmd cmd = {
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
+ .max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .l2_802_3_offload_ctrl = 0,
+ .l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_cfg_def_rx_offload_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_status_ring *sring = &wil->srings[ring_id];
+ int rc;
+ struct wmi_rx_status_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(sring->size),
+ .ring_id = ring_id,
+ },
+ .rx_msg_type = wil->use_compressed_rx_status ?
+ WMI_RX_MSG_TYPE_COMPRESSED :
+ WMI_RX_MSG_TYPE_EXTENDED,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_status_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
+ rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wil_ring *ring = &wil->ring_rx;
+ int rc;
+ struct wmi_rx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = WIL_RX_DESC_RING_ID,
+ },
+ .status_ring_id = status_ring_id,
+ .irq_index = WIL_RX_STATUS_IRQ_IDX,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
+ rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+
+ return 0;
+}
+
+int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
+ int rc;
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+ struct wmi_tx_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .status_ring_id = sring_id,
+ .cid = cid,
+ .tid = tid,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ }
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_tx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wil_ring *ring = &wil->ring_tx[ring_id];
+ int rc;
+ struct wmi_bcast_desc_ring_add_cmd cmd = {
+ .ring_cfg = {
+ .ring_size = cpu_to_le16(ring->size),
+ .ring_id = ring_id,
+ },
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .status_ring_id = wil->tx_sring_idx,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_rx_desc_ring_cfg_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
+ rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Broadcast Tx config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&txdata->lock);
+ ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
+ txdata->mid = vif->mid;
+ txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
+ return 0;
+}
+
+int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wmi_link_stats_cmd cmd = {
+ .record_type_mask = cpu_to_le32(type),
+ .cid = cid,
+ .action = WMI_LINK_STATS_SNAPSHOT,
+ .interval_msec = cpu_to_le32(interval),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_link_stats_config_done_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ int rc;
+
+ rc = wmi_call(wil, WMI_LINK_STATS_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID, &reply,
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_LINK_STATS_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Link statistics config failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct {
+ struct wmi_set_link_monitor_cmd cmd;
+ s8 rssi_thold;
+ } __packed cmd = {
+ .cmd = {
+ .rssi_hyst = rssi_hyst,
+ .rssi_thresholds_list_size = 1,
+ },
+ .rssi_thold = rssi_thold,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_set_link_monitor_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
+ return -EINVAL;
+
+ rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644
index 000000000..9affa4525
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -0,0 +1,4221 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Qualcomm
+ * 60 GHz wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ *
+ * This is an automatically generated file.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+#define WMI_DEFAULT_ASSOC_STA (1)
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+#define WMI_MAX_LOSS_DMG_BEACONS (20)
+#define MAX_NUM_OF_SECTORS (128)
+#define WMI_INVALID_TEMPERATURE (0xFFFFFFFF)
+#define WMI_SCHED_MAX_ALLOCS_PER_CMD (4)
+#define WMI_RF_DTYPE_LENGTH (3)
+#define WMI_RF_ETYPE_LENGTH (3)
+#define WMI_RF_RX2TX_LENGTH (3)
+#define WMI_RF_ETYPE_VAL_PER_RANGE (5)
+/* DTYPE configuration array size
+ * must always be kept equal to (WMI_RF_DTYPE_LENGTH+1)
+ */
+#define WMI_RF_DTYPE_CONF_LENGTH (4)
+/* ETYPE configuration array size
+ * must always be kept equal to
+ * (WMI_RF_ETYPE_LENGTH+WMI_RF_ETYPE_VAL_PER_RANGE)
+ */
+#define WMI_RF_ETYPE_CONF_LENGTH (8)
+/* RX2TX configuration array size
+ * must always be kept equal to (WMI_RF_RX2TX_LENGTH+1)
+ */
+#define WMI_RF_RX2TX_CONF_LENGTH (4)
+/* Qos configuration */
+#define WMI_QOS_NUM_OF_PRIORITY (4)
+#define WMI_QOS_MIN_DEFAULT_WEIGHT (10)
+#define WMI_QOS_VRING_SLOT_MIN_MS (2)
+#define WMI_QOS_VRING_SLOT_MAX_MS (10)
+/* (WMI_QOS_MIN_DEFAULT_WEIGHT * WMI_QOS_VRING_SLOT_MAX_MS /
+ * WMI_QOS_VRING_SLOT_MIN_MS)
+ */
+#define WMI_QOS_MAX_WEIGHT 50
+#define WMI_QOS_SET_VIF_PRIORITY (0xFF)
+#define WMI_QOS_DEFAULT_PRIORITY (WMI_QOS_NUM_OF_PRIORITY)
+#define WMI_MAX_XIF_PORTS_NUM (8)
+
+/* Mailbox interface
+ * used for commands and events
+ */
+enum wmi_mid {
+ MID_DEFAULT = 0x00,
+ FIRST_DBG_MID_ID = 0x10,
+ LAST_DBG_MID_ID = 0xFE,
+ MID_BROADCAST = 0xFF,
+};
+
+/* FW capability IDs
+ * Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
+ * the host
+ */
+enum wmi_fw_capability {
+ WMI_FW_CAPABILITY_FTM = 0,
+ WMI_FW_CAPABILITY_PS_CONFIG = 1,
+ WMI_FW_CAPABILITY_RF_SECTORS = 2,
+ WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
+ WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL = 4,
+ WMI_FW_CAPABILITY_WMI_ONLY = 5,
+ WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
+ WMI_FW_CAPABILITY_D3_SUSPEND = 8,
+ WMI_FW_CAPABILITY_LONG_RANGE = 9,
+ WMI_FW_CAPABILITY_FIXED_SCHEDULING = 10,
+ WMI_FW_CAPABILITY_MULTI_DIRECTED_OMNIS = 11,
+ WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
+ WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
+ WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_PNO = 15,
+ WMI_FW_CAPABILITY_CHANNEL_BONDING = 17,
+ WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
+ WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19,
+ WMI_FW_CAPABILITY_MULTI_VIFS = 20,
+ WMI_FW_CAPABILITY_FT_ROAMING = 21,
+ WMI_FW_CAPABILITY_BACK_WIN_SIZE_64 = 22,
+ WMI_FW_CAPABILITY_AMSDU = 23,
+ WMI_FW_CAPABILITY_RAW_MODE = 24,
+ WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
+ WMI_FW_CAPABILITY_CHANNEL_4 = 26,
+ WMI_FW_CAPABILITY_IPA = 27,
+ WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF = 30,
+ WMI_FW_CAPABILITY_SPLIT_REKEY = 31,
+ WMI_FW_CAPABILITY_MAX,
+};
+
+/* WMI_CMD_HDR */
+struct wmi_cmd_hdr {
+ u8 mid;
+ u8 reserved;
+ __le16 command_id;
+ __le32 fw_timestamp;
+} __packed;
+
+/* List of Commands */
+enum wmi_command_id {
+ WMI_CONNECT_CMDID = 0x01,
+ WMI_DISCONNECT_CMDID = 0x03,
+ WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCHED_SCAN_CMDID = 0x05,
+ WMI_STOP_SCHED_SCAN_CMDID = 0x06,
+ WMI_START_SCAN_CMDID = 0x07,
+ WMI_SET_BSS_FILTER_CMDID = 0x09,
+ WMI_SET_PROBED_SSID_CMDID = 0x0A,
+ /* deprecated */
+ WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_FT_AUTH_CMDID = 0x0C,
+ WMI_FT_REASSOC_CMDID = 0x0D,
+ WMI_UPDATE_FT_IES_CMDID = 0x0E,
+ WMI_BCON_CTRL_CMDID = 0x0F,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x16,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
+ WMI_PCP_CONF_CMDID = 0x18,
+ WMI_SET_APPIE_CMDID = 0x3F,
+ WMI_SET_WSC_STATUS_CMDID = 0x41,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x42,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_RADAR_GENERAL_CONFIG_CMDID = 0x100,
+ WMI_RADAR_CONFIG_SELECT_CMDID = 0x101,
+ WMI_RADAR_PARAMS_CONFIG_CMDID = 0x102,
+ WMI_RADAR_SET_MODE_CMDID = 0x103,
+ WMI_RADAR_CONTROL_CMDID = 0x104,
+ WMI_RADAR_PCI_CONTROL_CMDID = 0x105,
+ WMI_MEM_READ_CMDID = 0x800,
+ WMI_MEM_WR_CMDID = 0x801,
+ WMI_ECHO_CMDID = 0x803,
+ WMI_DEEP_ECHO_CMDID = 0x804,
+ WMI_CONFIG_MAC_CMDID = 0x805,
+ /* deprecated */
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x809,
+ /* deprecated */
+ WMI_FS_TUNE_CMDID = 0x80A,
+ /* deprecated */
+ WMI_CORR_MEASURE_CMDID = 0x80B,
+ WMI_READ_RSSI_CMDID = 0x80C,
+ WMI_TEMP_SENSE_CMDID = 0x80E,
+ WMI_DC_CALIB_CMDID = 0x80F,
+ /* deprecated */
+ WMI_SEND_TONE_CMDID = 0x810,
+ /* deprecated */
+ WMI_IQ_TX_CALIB_CMDID = 0x811,
+ /* deprecated */
+ WMI_IQ_RX_CALIB_CMDID = 0x812,
+ WMI_SET_WORK_MODE_CMDID = 0x815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
+ WMI_LO_POWER_CALIB_FROM_OTP_CMDID = 0x817,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
+ /* deprecated */
+ WMI_RF_RX_TEST_CMDID = 0x81E,
+ WMI_CFG_RX_CHAIN_CMDID = 0x820,
+ WMI_VRING_CFG_CMDID = 0x821,
+ WMI_BCAST_VRING_CFG_CMDID = 0x822,
+ WMI_RING_BA_EN_CMDID = 0x823,
+ WMI_RING_BA_DIS_CMDID = 0x824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x825,
+ WMI_RCP_DELBA_CMDID = 0x826,
+ WMI_SET_SSID_CMDID = 0x827,
+ WMI_GET_SSID_CMDID = 0x828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
+ WMI_SW_TX_REQ_CMDID = 0x82B,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
+ WMI_SW_TX_REQ_EXT_CMDID = 0x82C,
+ WMI_MLME_PUSH_CMDID = 0x835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x837,
+ WMI_BF_SM_MGMT_CMDID = 0x838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x839,
+ WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
+ WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
+ WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
+ WMI_SET_LINK_MONITOR_CMDID = 0x845,
+ WMI_SET_SECTORS_CMDID = 0x849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x851,
+ WMI_RS_MGMT_CMDID = 0x852,
+ WMI_RF_MGMT_CMDID = 0x853,
+ WMI_RF_XPM_READ_CMDID = 0x856,
+ WMI_RF_XPM_WRITE_CMDID = 0x857,
+ WMI_LED_CFG_CMDID = 0x858,
+ WMI_SET_CONNECT_SNR_THR_CMDID = 0x85B,
+ WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C,
+ WMI_RF_PWR_ON_DELAY_CMDID = 0x85D,
+ WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E,
+ WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID = 0x85F,
+ /* Performance monitoring commands */
+ WMI_BF_CTRL_CMDID = 0x862,
+ WMI_NOTIFY_REQ_CMDID = 0x863,
+ WMI_GET_STATUS_CMDID = 0x864,
+ WMI_GET_RF_STATUS_CMDID = 0x866,
+ WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
+ WMI_VRING_SWITCH_TIMING_CONFIG_CMDID = 0x868,
+ WMI_UNIT_TEST_CMDID = 0x900,
+ WMI_FLASH_READ_CMDID = 0x902,
+ WMI_FLASH_WRITE_CMDID = 0x903,
+ /* Power management */
+ WMI_TRAFFIC_SUSPEND_CMDID = 0x904,
+ WMI_TRAFFIC_RESUME_CMDID = 0x905,
+ /* P2P */
+ WMI_P2P_CFG_CMDID = 0x910,
+ WMI_PORT_ALLOCATE_CMDID = 0x911,
+ WMI_PORT_DELETE_CMDID = 0x912,
+ WMI_POWER_MGMT_CFG_CMDID = 0x913,
+ WMI_START_LISTEN_CMDID = 0x914,
+ WMI_START_SEARCH_CMDID = 0x915,
+ WMI_DISCOVERY_START_CMDID = 0x916,
+ WMI_DISCOVERY_STOP_CMDID = 0x917,
+ WMI_PCP_START_CMDID = 0x918,
+ WMI_PCP_STOP_CMDID = 0x919,
+ WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ /* Power Save Configuration Commands */
+ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ WMI_RS_ENABLE_CMDID = 0x91E,
+ WMI_RS_CFG_EX_CMDID = 0x91F,
+ WMI_GET_DETAILED_RS_RES_EX_CMDID = 0x920,
+ /* deprecated */
+ WMI_RS_CFG_CMDID = 0x921,
+ /* deprecated */
+ WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
+ WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
+ WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930,
+ WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
+ WMI_NEW_STA_CMDID = 0x935,
+ WMI_DEL_STA_CMDID = 0x936,
+ WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940,
+ WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941,
+ /* Read Power Save profile type */
+ WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942,
+ WMI_TSF_SYNC_CMDID = 0x973,
+ WMI_TOF_SESSION_START_CMDID = 0x991,
+ WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
+ WMI_TOF_SET_LCR_CMDID = 0x993,
+ WMI_TOF_SET_LCI_CMDID = 0x994,
+ WMI_TOF_CFG_RESPONDER_CMDID = 0x996,
+ WMI_TOF_SET_TX_RX_OFFSET_CMDID = 0x997,
+ WMI_TOF_GET_TX_RX_OFFSET_CMDID = 0x998,
+ WMI_TOF_CHANNEL_INFO_CMDID = 0x999,
+ WMI_GET_RF_SECTOR_PARAMS_CMDID = 0x9A0,
+ WMI_SET_RF_SECTOR_PARAMS_CMDID = 0x9A1,
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A2,
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A3,
+ WMI_SET_RF_SECTOR_ON_CMDID = 0x9A4,
+ WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5,
+ WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
+ WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
+ /* deprecated */
+ WMI_BF_CONTROL_CMDID = 0x9AA,
+ WMI_BF_CONTROL_EX_CMDID = 0x9AB,
+ WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
+ WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
+ WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
+ WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
+ WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
+ WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
+ WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
+ WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
+ WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
+ WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04,
+ WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05,
+ WMI_GET_ASSOC_LIST_CMDID = 0xA06,
+ WMI_GET_CCA_INDICATIONS_CMDID = 0xA07,
+ WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08,
+ WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B,
+ WMI_LINK_STATS_CMDID = 0xA0C,
+ WMI_SET_GRANT_MCS_CMDID = 0xA0E,
+ WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
+ WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
+ WMI_RBUFCAP_CFG_CMDID = 0xA12,
+ WMI_TEMP_SENSE_ALL_CMDID = 0xA13,
+ WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
+ WMI_ABORT_SCAN_CMDID = 0xF007,
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
+ /* deprecated */
+ WMI_GET_PMK_CMDID = 0xF048,
+ WMI_SET_PASSPHRASE_CMDID = 0xF049,
+ /* deprecated */
+ WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
+ /* deprecated */
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
+ WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
+ WMI_FW_VER_CMDID = 0xF04E,
+ WMI_PMC_CMDID = 0xF04F,
+};
+
+/* WMI_CONNECT_CMDID */
+enum wmi_network_type {
+ WMI_NETTYPE_INFRA = 0x01,
+ WMI_NETTYPE_ADHOC = 0x02,
+ WMI_NETTYPE_ADHOC_CREATOR = 0x04,
+ WMI_NETTYPE_AP = 0x10,
+ WMI_NETTYPE_P2P = 0x20,
+ /* PCIE over 60g */
+ WMI_NETTYPE_WBE = 0x40,
+};
+
+enum wmi_dot11_auth_mode {
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
+};
+
+enum wmi_auth_mode {
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
+};
+
+enum wmi_crypto_type {
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_AES_GCMP = 0x20,
+};
+
+enum wmi_connect_ctrl_flag_bits {
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x01,
+ WMI_CONNECT_SEND_REASSOC = 0x02,
+ WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x04,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x08,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x10,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x20,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x40,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x80,
+};
+
+#define WMI_MAX_SSID_LEN (32)
+
+enum wmi_channel {
+ WMI_CHANNEL_1 = 0x00,
+ WMI_CHANNEL_2 = 0x01,
+ WMI_CHANNEL_3 = 0x02,
+ WMI_CHANNEL_4 = 0x03,
+ WMI_CHANNEL_5 = 0x04,
+ WMI_CHANNEL_6 = 0x05,
+ WMI_CHANNEL_9 = 0x06,
+ WMI_CHANNEL_10 = 0x07,
+ WMI_CHANNEL_11 = 0x08,
+ WMI_CHANNEL_12 = 0x09,
+};
+
+/* WMI_CONNECT_CMDID */
+struct wmi_connect_cmd {
+ u8 network_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 pairwise_crypto_type;
+ u8 pairwise_crypto_len;
+ u8 group_crypto_type;
+ u8 group_crypto_len;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is
+ * the primary channel number
+ */
+ u8 channel;
+ /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */
+ u8 edmg_channel;
+ u8 bssid[WMI_MAC_LEN];
+ __le32 ctrl_flags;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved1[2];
+} __packed;
+
+/* WMI_DISCONNECT_STA_CMDID */
+struct wmi_disconnect_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 disconnect_reason;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX (3)
+#define WMI_MAX_KEY_LEN (32)
+#define WMI_PASSPHRASE_LEN (64)
+
+/* WMI_SET_PASSPHRASE_CMDID */
+struct wmi_set_passphrase_cmd {
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 passphrase[WMI_PASSPHRASE_LEN];
+ u8 ssid_len;
+ u8 passphrase_len;
+} __packed;
+
+/* WMI_ADD_CIPHER_KEY_CMDID */
+enum wmi_key_usage {
+ WMI_KEY_USE_PAIRWISE = 0x00,
+ WMI_KEY_USE_RX_GROUP = 0x01,
+ WMI_KEY_USE_TX_GROUP = 0x02,
+ WMI_KEY_USE_STORE_PTK = 0x03,
+ WMI_KEY_USE_APPLY_PTK = 0x04,
+};
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+ /* enum wmi_key_usage */
+ u8 key_usage;
+ u8 key_len;
+ /* key replay sequence counter */
+ u8 key_rsc[8];
+ u8 key[WMI_MAX_KEY_LEN];
+ /* Additional Key Control information */
+ u8 key_op_ctrl;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/* WMI_DELETE_CIPHER_KEY_CMDID */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/* WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+ WMI_ACTIVE_SCAN = 0x00,
+ WMI_SHORT_SCAN = 0x01,
+ WMI_PASSIVE_SCAN = 0x02,
+ WMI_DIRECT_SCAN = 0x03,
+ WMI_LONG_SCAN = 0x04,
+};
+
+/* WMI_START_SCAN_CMDID */
+struct wmi_start_scan_cmd {
+ u8 direct_scan_mac_addr[WMI_MAC_LEN];
+ /* run scan with discovery beacon. Relevant for ACTIVE scan only. */
+ u8 discovery_mode;
+ u8 reserved;
+ /* Max duration in the home channel(ms) */
+ __le32 dwell_time;
+ /* Time interval between scans (ms) */
+ __le32 force_scan_interval;
+ /* enum wmi_scan_type */
+ u8 scan_type;
+ /* how many channels follow */
+ u8 num_channels;
+ /* channels ID's:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ struct {
+ u8 channel;
+ u8 reserved;
+ } channel_list[];
+} __packed;
+
+#define WMI_MAX_PNO_SSID_NUM (16)
+#define WMI_MAX_CHANNEL_NUM (6)
+#define WMI_MAX_PLANS_NUM (2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ s8 rssi_threshold;
+ /* boolean */
+ u8 add_ssid_to_probe;
+ u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+ __le16 interval_sec;
+ __le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+ struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+ u8 num_of_ssids;
+ s8 min_rssi_threshold;
+ u8 channel_list[WMI_MAX_CHANNEL_NUM];
+ u8 num_of_channels;
+ u8 reserved;
+ __le16 initial_delay_sec;
+ struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
+/* WMI_FT_AUTH_CMDID */
+struct wmi_ft_auth_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_FT_REASSOC_CMDID */
+struct wmi_ft_reassoc_cmd {
+ u8 bssid[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+/* WMI_UPDATE_FT_IES_CMDID */
+struct wmi_update_ft_ies_cmd {
+ /* Length of the FT IEs */
+ __le16 ie_len;
+ u8 reserved[2];
+ u8 ie_info[];
+} __packed;
+
+/* WMI_SET_PROBED_SSID_CMDID */
+#define MAX_PROBED_SSID_INDEX (3)
+
+enum wmi_ssid_flag {
+ /* disables entry */
+ WMI_SSID_FLAG_DISABLE = 0x00,
+ /* probes specified ssid */
+ WMI_SSID_FLAG_SPECIFIC = 0x01,
+ /* probes for any ssid */
+ WMI_SSID_FLAG_ANY = 0x02,
+};
+
+struct wmi_probed_ssid_cmd {
+ /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 entry_index;
+ /* enum wmi_ssid_flag */
+ u8 flag;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/* WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+#define WMI_MAX_IE_LEN (1024)
+
+/* Frame Types */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0x00,
+ WMI_FRAME_PROBE_REQ = 0x01,
+ WMI_FRAME_PROBE_RESP = 0x02,
+ WMI_FRAME_ASSOC_REQ = 0x03,
+ WMI_FRAME_ASSOC_RESP = 0x04,
+ WMI_NUM_MGMT_FRAME = 0x05,
+};
+
+struct wmi_set_appie_cmd {
+ /* enum wmi_mgmt_frame_type */
+ u8 mgmt_frm_type;
+ u8 reserved;
+ /* Length of the IE to be added to MGMT frame */
+ __le16 ie_len;
+ u8 ie_info[];
+} __packed;
+
+/* WMI_PXMT_RANGE_CFG_CMDID */
+struct wmi_pxmt_range_cfg_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 range;
+} __packed;
+
+/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */
+struct wmi_pxmt_snr2_range_cfg_cmd {
+ s8 snr2range_arr[2];
+} __packed;
+
+/* WMI_RADAR_GENERAL_CONFIG_CMDID */
+struct wmi_radar_general_config_cmd {
+ /* Number of pulses (CIRs) in FW FIFO to initiate pulses transfer
+ * from FW to Host
+ */
+ __le32 fifo_watermark;
+ /* In unit of us, in the range [100, 1000000] */
+ __le32 t_burst;
+ /* Valid in the range [1, 32768], 0xFFFF means infinite */
+ __le32 n_bursts;
+ /* In unit of 330Mhz clk, in the range [4, 2000]*330 */
+ __le32 t_pulse;
+ /* In the range of [1,4096] */
+ __le16 n_pulses;
+ /* Number of taps after cTap per CIR */
+ __le16 n_samples;
+ /* Offset from the main tap (0 = zero-distance). In the range of [0,
+ * 255]
+ */
+ u8 first_sample_offset;
+ /* Number of Pulses to average, 1, 2, 4, 8 */
+ u8 pulses_to_avg;
+ /* Number of adjacent taps to average, 1, 2, 4, 8 */
+ u8 samples_to_avg;
+ /* The index to config general params */
+ u8 general_index;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_CMDID */
+struct wmi_radar_config_select_cmd {
+ /* Select the general params index to use */
+ u8 general_index;
+ u8 reserved[3];
+ /* 0 means don't update burst_active_vector */
+ __le32 burst_active_vector;
+ /* 0 means don't update pulse_active_vector */
+ __le32 pulse_active_vector;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_CMDID */
+struct wmi_radar_params_config_cmd {
+ /* The burst index selected to config */
+ u8 burst_index;
+ /* 0-not active, 1-active */
+ u8 burst_en;
+ /* The pulse index selected to config */
+ u8 pulse_index;
+ /* 0-not active, 1-active */
+ u8 pulse_en;
+ /* TX RF to use on current pulse */
+ u8 tx_rfc_idx;
+ u8 tx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 tx_rf_gain_comp;
+ /* expected to be 0 */
+ s8 tx_bb_gain_comp;
+ /* RX RF to use on current pulse */
+ u8 rx_rfc_idx;
+ u8 rx_sector;
+ /* Offset from calibrated value.(expected to be 0)(value is row in
+ * Gain-LUT, not dB)
+ */
+ s8 rx_rf_gain_comp;
+ /* Value in dB.(expected to be 0) */
+ s8 rx_bb_gain_comp;
+ /* Offset from calibrated value.(expected to be 0) */
+ s8 rx_timing_offset;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_CMDID */
+struct wmi_radar_set_mode_cmd {
+ /* 0-disable/1-enable */
+ u8 enable;
+ /* enum wmi_channel */
+ u8 channel;
+ /* In the range of [0,7], 0xff means use default */
+ u8 tx_rfc_idx;
+ /* In the range of [0,7], 0xff means use default */
+ u8 rx_rfc_idx;
+} __packed;
+
+/* WMI_RADAR_CONTROL_CMDID */
+struct wmi_radar_control_cmd {
+ /* 0-stop/1-start */
+ u8 start;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_CMDID */
+struct wmi_radar_pci_control_cmd {
+ /* pcie host buffer start address */
+ __le64 base_addr;
+ /* pcie host control block address */
+ __le64 control_block_addr;
+ /* pcie host buffer size */
+ __le32 buffer_size;
+ __le32 reserved;
+} __packed;
+
+/* WMI_RF_MGMT_CMDID */
+enum wmi_rf_mgmt_type {
+ WMI_RF_MGMT_W_DISABLE = 0x00,
+ WMI_RF_MGMT_W_ENABLE = 0x01,
+ WMI_RF_MGMT_GET_STATUS = 0x02,
+};
+
+/* WMI_BF_CONTROL_CMDID */
+enum wmi_bf_triggers {
+ WMI_BF_TRIGGER_RS_MCS1_TH_FAILURE = 0x01,
+ WMI_BF_TRIGGER_RS_MCS1_NO_BACK_FAILURE = 0x02,
+ WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP = 0x04,
+ WMI_BF_TRIGGER_MAX_BACK_FAILURE = 0x08,
+ WMI_BF_TRIGGER_FW = 0x10,
+ WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_KEEP_ALIVE = 0x20,
+ WMI_BF_TRIGGER_AOA = 0x40,
+ WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_UPM = 0x80,
+};
+
+/* WMI_RF_MGMT_CMDID */
+struct wmi_rf_mgmt_cmd {
+ __le32 rf_mgmt_type;
+} __packed;
+
+/* WMI_CORR_MEASURE_CMDID */
+struct wmi_corr_measure_cmd {
+ __le32 freq_mhz;
+ __le32 length_samples;
+ __le32 iterations;
+} __packed;
+
+/* WMI_SET_SSID_CMDID */
+struct wmi_set_ssid_cmd {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/* WMI_SET_PCP_CHANNEL_CMDID */
+struct wmi_set_pcp_channel_cmd {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_BCON_CTRL_CMDID */
+struct wmi_bcon_ctrl_cmd {
+ __le16 bcon_interval;
+ __le16 frag_num;
+ __le64 ss_mask;
+ u8 network_type;
+ u8 pcp_max_assoc_sta;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+ u8 hidden_ssid;
+ u8 is_go;
+ /* A-BFT length override if non-0 */
+ u8 abft_len;
+ u8 reserved;
+} __packed;
+
+/* WMI_PORT_ALLOCATE_CMDID */
+enum wmi_port_role {
+ WMI_PORT_STA = 0x00,
+ WMI_PORT_PCP = 0x01,
+ WMI_PORT_AP = 0x02,
+ WMI_PORT_P2P_DEV = 0x03,
+ WMI_PORT_P2P_CLIENT = 0x04,
+ WMI_PORT_P2P_GO = 0x05,
+};
+
+/* WMI_PORT_ALLOCATE_CMDID */
+struct wmi_port_allocate_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 port_role;
+ u8 mid;
+} __packed;
+
+/* WMI_PORT_DELETE_CMDID */
+struct wmi_port_delete_cmd {
+ u8 mid;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
+enum wmi_wakeup_trigger {
+ WMI_WAKEUP_TRIGGER_UCAST = 0x01,
+ WMI_WAKEUP_TRIGGER_BCAST = 0x02,
+};
+
+/* WMI_TRAFFIC_SUSPEND_CMDID */
+struct wmi_traffic_suspend_cmd {
+ /* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
+ u8 wakeup_trigger;
+} __packed;
+
+/* WMI_P2P_CFG_CMDID */
+enum wmi_discovery_mode {
+ WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
+ WMI_DISCOVERY_MODE_OFFLOAD = 0x01,
+ WMI_DISCOVERY_MODE_PEER2PEER = 0x02,
+};
+
+struct wmi_p2p_cfg_cmd {
+ /* enum wmi_discovery_mode */
+ u8 discovery_mode;
+ u8 channel;
+ /* base to listen/search duration calculation */
+ __le16 bcon_interval;
+} __packed;
+
+/* WMI_POWER_MGMT_CFG_CMDID */
+enum wmi_power_source_type {
+ WMI_POWER_SOURCE_BATTERY = 0x00,
+ WMI_POWER_SOURCE_OTHER = 0x01,
+};
+
+struct wmi_power_mgmt_cfg_cmd {
+ /* enum wmi_power_source_type */
+ u8 power_source;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PCP_START_CMDID */
+enum wmi_ap_sme_offload_mode {
+ /* Full AP SME in FW */
+ WMI_AP_SME_OFFLOAD_FULL = 0x00,
+ /* Probe AP SME in FW */
+ WMI_AP_SME_OFFLOAD_PARTIAL = 0x01,
+ /* AP SME in host */
+ WMI_AP_SME_OFFLOAD_NONE = 0x02,
+};
+
+/* WMI_PCP_START_CMDID */
+struct wmi_pcp_start_cmd {
+ __le16 bcon_interval;
+ u8 pcp_max_assoc_sta;
+ u8 hidden_ssid;
+ u8 is_go;
+ /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */
+ u8 edmg_channel;
+ u8 raw_mode;
+ u8 reserved[3];
+ /* A-BFT length override if non-0 */
+ u8 abft_len;
+ /* enum wmi_ap_sme_offload_mode_e */
+ u8 ap_sme_offload_mode;
+ u8 network_type;
+ /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is
+ * the primary channel number
+ */
+ u8 channel;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/* WMI_SW_TX_REQ_CMDID */
+struct wmi_sw_tx_req_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ u8 payload[];
+} __packed;
+
+/* WMI_SW_TX_REQ_EXT_CMDID */
+struct wmi_sw_tx_req_ext_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ __le16 duration_ms;
+ /* Channel to use, 0xFF for currently active channel */
+ u8 channel;
+ u8 reserved[5];
+ u8 payload[];
+} __packed;
+
+/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */
+struct wmi_vring_switch_timing_config_cmd {
+ /* Set vring timing configuration:
+ *
+ * defined interval for vring switch
+ */
+ __le32 interval_usec;
+ /* vring inactivity threshold */
+ __le32 idle_th_usec;
+} __packed;
+
+struct wmi_sw_ring_cfg {
+ __le64 ring_mem_base;
+ __le16 ring_size;
+ __le16 max_mpdu_size;
+} __packed;
+
+/* wmi_vring_cfg_schd */
+struct wmi_vring_cfg_schd {
+ __le16 priority;
+ __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+ WMI_VRING_ENC_TYPE_802_3 = 0x00,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
+ WMI_VRING_ENC_TYPE_NONE = 0x02,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+ WMI_VRING_DS_PBSS = 0x00,
+ WMI_VRING_DS_STATION = 0x01,
+ WMI_VRING_DS_AP = 0x02,
+ WMI_VRING_DS_ADDR4 = 0x03,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0x00,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 0x01,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 0x02,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+ WMI_SCH_PRIO_REGULAR = 0x00,
+ WMI_SCH_PRIO_HIGH = 0x01,
+};
+
+#define CIDXTID_EXTENDED_CID_TID (0xFF)
+#define CIDXTID_CID_POS (0)
+#define CIDXTID_CID_LEN (4)
+#define CIDXTID_CID_MSK (0xF)
+#define CIDXTID_TID_POS (4)
+#define CIDXTID_TID_LEN (4)
+#define CIDXTID_TID_MSK (0xF0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+
+struct wmi_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ /* 0-23 vrings */
+ u8 ringid;
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 encap_trans_type;
+ /* 802.3 DS cfg */
+ u8 ds_cfg;
+ u8 nwifi_ds_trans_type;
+ u8 mac_ctrl;
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ struct wmi_vring_cfg_schd schd_params;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ /* Update the vring's priority for Qos purpose. Set to
+ * WMI_QOS_DEFAULT_PRIORITY to use MID's QoS priority
+ */
+ u8 qos_priority;
+ u8 reserved;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+ WMI_VRING_CMD_ADD = 0x00,
+ WMI_VRING_CMD_MODIFY = 0x01,
+ WMI_VRING_CMD_DELETE = 0x02,
+};
+
+/* WMI_VRING_CFG_CMDID */
+struct wmi_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+struct wmi_bcast_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ /* 0-23 vrings */
+ u8 ringid;
+ u8 encap_trans_type;
+ /* 802.3 DS cfg */
+ u8 ds_cfg;
+ u8 nwifi_ds_trans_type;
+} __packed;
+
+/* WMI_BCAST_VRING_CFG_CMDID */
+struct wmi_bcast_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_bcast_vring_cfg vring_cfg;
+} __packed;
+
+struct wmi_edma_ring_cfg {
+ __le64 ring_mem_base;
+ /* size in number of items */
+ __le16 ring_size;
+ u8 ring_id;
+ u8 reserved;
+} __packed;
+
+enum wmi_rx_msg_type {
+ WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
+ WMI_RX_MSG_TYPE_EXTENDED = 0x01,
+};
+
+enum wmi_ring_add_irq_mode {
+ /* Backwards compatibility
+ * for DESC ring - interrupt disabled
+ * for STATUS ring - interrupt enabled
+ */
+ WMI_RING_ADD_IRQ_MODE_BWC = 0x00,
+ WMI_RING_ADD_IRQ_MODE_DISABLE = 0x01,
+ WMI_RING_ADD_IRQ_MODE_ENABLE = 0x02,
+};
+
+struct wmi_tx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* wmi_ring_add_irq_mode */
+ u8 irq_mode;
+ u8 reserved[2];
+} __packed;
+
+struct wmi_rx_status_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* wmi_rx_msg_type */
+ u8 rx_msg_type;
+ u8 reserved[2];
+} __packed;
+
+struct wmi_cfg_def_rx_offload_cmd {
+ __le16 max_msdu_size;
+ __le16 max_rx_pl_per_desc;
+ u8 decap_trans_type;
+ u8 l2_802_3_offload_ctrl;
+ u8 l2_nwifi_offload_ctrl;
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+ u8 l3_l4_ctrl;
+ u8 reserved[6];
+} __packed;
+
+struct wmi_tx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 cid;
+ u8 tid;
+ u8 encap_trans_type;
+ u8 mac_ctrl;
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ u8 irq_index;
+ /* wmi_ring_add_irq_mode */
+ u8 irq_mode;
+ u8 reserved;
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+struct wmi_rx_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ u8 irq_index;
+ /* 0-63 status rings */
+ u8 status_ring_id;
+ u8 reserved[2];
+ __le64 sw_tail_host_addr;
+} __packed;
+
+struct wmi_bcast_desc_ring_add_cmd {
+ struct wmi_edma_ring_cfg ring_cfg;
+ __le16 max_msdu_size;
+ /* Correlated status ring (0-63) */
+ u8 status_ring_id;
+ u8 encap_trans_type;
+ u8 reserved[4];
+} __packed;
+
+/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */
+struct wmi_lo_power_calib_from_otp_cmd {
+ /* index to read from OTP. zero based */
+ u8 index;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LO_POWER_CALIB_FROM_OTP_EVENTID */
+struct wmi_lo_power_calib_from_otp_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RING_BA_EN_CMDID */
+struct wmi_ring_ba_en_cmd {
+ u8 ring_id;
+ u8 agg_max_wsize;
+ __le16 ba_timeout;
+ u8 amsdu;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RING_BA_DIS_CMDID */
+struct wmi_ring_ba_dis_cmd {
+ u8 ring_id;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/* WMI_NOTIFY_REQ_CMDID */
+struct wmi_notify_req_cmd {
+ u8 cid;
+ u8 year;
+ u8 month;
+ u8 day;
+ __le32 interval_usec;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 miliseconds;
+} __packed;
+
+/* WMI_CFG_RX_CHAIN_CMDID */
+enum wmi_sniffer_cfg_mode {
+ WMI_SNIFFER_OFF = 0x00,
+ WMI_SNIFFER_ON = 0x01,
+};
+
+/* WMI_SILENT_RSSI_TABLE */
+enum wmi_silent_rssi_table {
+ RF_TEMPERATURE_CALIB_DEFAULT_DB = 0x00,
+ RF_TEMPERATURE_CALIB_HIGH_POWER_DB = 0x01,
+};
+
+/* WMI_SILENT_RSSI_STATUS */
+enum wmi_silent_rssi_status {
+ SILENT_RSSI_SUCCESS = 0x00,
+ SILENT_RSSI_FAILURE = 0x01,
+};
+
+/* WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID */
+struct wmi_set_active_silent_rssi_table_cmd {
+ /* enum wmi_silent_rssi_table */
+ __le32 table;
+} __packed;
+
+enum wmi_sniffer_cfg_phy_info_mode {
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0x00,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 0x01,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+ WMI_SNIFFER_CP = 0x00,
+ WMI_SNIFFER_DP = 0x01,
+ WMI_SNIFFER_BOTH_PHYS = 0x02,
+};
+
+/* wmi_sniffer_cfg */
+struct wmi_sniffer_cfg {
+ /* enum wmi_sniffer_cfg_mode */
+ __le32 mode;
+ /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_info_mode;
+ /* enum wmi_sniffer_cfg_phy_support */
+ __le32 phy_support;
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+ WMI_RX_CHAIN_ADD = 0x00,
+ WMI_RX_CHAIN_DEL = 0x01,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+ WMI_DECAP_TYPE_802_3 = 0x00,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 0x01,
+ WMI_DECAP_TYPE_NONE = 0x02,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0x00,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 0x01,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 0x02,
+};
+
+enum wmi_cfg_rx_chain_cmd_reorder_type {
+ WMI_RX_HW_REORDER = 0x00,
+ WMI_RX_SW_REORDER = 0x01,
+};
+
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+
+/* WMI_CFG_RX_CHAIN_CMDID */
+struct wmi_cfg_rx_chain_cmd {
+ __le32 action;
+ struct wmi_sw_ring_cfg rx_sw_ring;
+ u8 mid;
+ u8 decap_trans_type;
+ u8 l2_802_3_offload_ctrl;
+ u8 l2_nwifi_offload_ctrl;
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+ u8 l3_l4_ctrl;
+ u8 ring_ctrl;
+ __le16 prefetch_thrsh;
+ __le16 wb_thrsh;
+ __le32 itr_value;
+ __le16 host_thrsh;
+ u8 reorder_type;
+ u8 reserved;
+ struct wmi_sniffer_cfg sniffer_cfg;
+ __le16 max_rx_pl_per_desc;
+} __packed;
+
+/* WMI_RCP_ADDBA_RESP_CMDID */
+struct wmi_rcp_addba_resp_cmd {
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 dialog_token;
+ __le16 status_code;
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
+struct wmi_rcp_addba_resp_edma_cmd {
+ u8 cid;
+ u8 tid;
+ u8 dialog_token;
+ u8 reserved;
+ __le16 status_code;
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ u8 status_ring_id;
+ /* wmi_cfg_rx_chain_cmd_reorder_type */
+ u8 reorder_type;
+} __packed;
+
+/* WMI_RCP_DELBA_CMDID */
+struct wmi_rcp_delba_cmd {
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 reserved;
+ __le16 reason;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ u8 reserved2[2];
+} __packed;
+
+/* WMI_RCP_ADDBA_REQ_CMDID */
+struct wmi_rcp_addba_req_cmd {
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset field as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_SET_MAC_ADDRESS_CMDID */
+struct wmi_set_mac_address_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+/* WMI_ECHO_CMDID
+ * Check FW is alive
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+/* WMI_DEEP_ECHO_CMDID
+ * Check FW and uCode is alive
+ * Returned event: WMI_DEEP_ECHO_RSP_EVENTID
+ */
+struct wmi_deep_echo_cmd {
+ __le32 value;
+} __packed;
+
+/* WMI_RF_PWR_ON_DELAY_CMDID
+ * set FW time parameters used through RF resetting
+ * RF reset consists of bringing its power down for a period of time, then
+ * bringing the power up
+ * Returned event: WMI_RF_PWR_ON_DELAY_RSP_EVENTID
+ */
+struct wmi_rf_pwr_on_delay_cmd {
+ /* time in usec the FW waits after bringing the RF PWR down,
+ * set 0 for default
+ */
+ __le16 down_delay_usec;
+ /* time in usec the FW waits after bringing the RF PWR up,
+ * set 0 for default
+ */
+ __le16 up_delay_usec;
+} __packed;
+
+/* WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID
+ * This API controls the Tx and Rx gain over temperature.
+ * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers.
+ * It also controls the Tx gain index, by controlling the Rx to Tx gain index
+ * offset.
+ * The control is divided by 3 temperature values to 4 temperature ranges.
+ * Each parameter uses its own temperature values.
+ * Returned event: WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID
+ */
+struct wmi_set_high_power_table_params_cmd {
+ /* Temperature range for Tx D-type parameters */
+ u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH];
+ u8 reserved0;
+ /* Tx D-type values to be used for each temperature range */
+ __le32 tx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH];
+ /* Temperature range for Tx E-type parameters */
+ u8 tx_etype_temp[WMI_RF_ETYPE_LENGTH];
+ u8 reserved1;
+ /* Tx E-type values to be used for each temperature range.
+ * The last 4 values of any range are the first 4 values of the next
+ * range and so on
+ */
+ __le32 tx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH];
+ /* Temperature range for Rx D-type parameters */
+ u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH];
+ u8 reserved2;
+ /* Rx D-type values to be used for each temperature range */
+ __le32 rx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH];
+ /* Temperature range for Rx E-type parameters */
+ u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH];
+ u8 reserved3;
+ /* Rx E-type values to be used for each temperature range.
+ * The last 4 values of any range are the first 4 values of the next
+ * range and so on
+ */
+ __le32 rx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH];
+ /* Temperature range for rx_2_tx_offs parameters */
+ u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH];
+ u8 reserved4;
+ /* Rx to Tx gain index offset */
+ s8 rx_2_tx_offs[WMI_RF_RX2TX_CONF_LENGTH];
+} __packed;
+
+/* WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID
+ * This API sets rd parameter per mcs.
+ * Relevant only in Fixed Scheduling mode.
+ * Returned event: WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID
+ */
+struct wmi_fixed_scheduling_ul_config_cmd {
+ /* Use mcs -1 to set for every mcs */
+ s8 mcs;
+ /* Number of frames with rd bit set in a single virtual slot */
+ u8 rd_count_per_slot;
+ u8 reserved[2];
+} __packed;
+
+/* CMD: WMI_RF_XPM_READ_CMDID */
+struct wmi_rf_xpm_read_cmd {
+ u8 rf_id;
+ u8 reserved[3];
+ /* XPM bit start address in range [0,8191]bits - rounded by FW to
+ * multiple of 8bits
+ */
+ __le32 xpm_bit_address;
+ __le32 num_bytes;
+} __packed;
+
+/* CMD: WMI_RF_XPM_WRITE_CMDID */
+struct wmi_rf_xpm_write_cmd {
+ u8 rf_id;
+ u8 reserved0[3];
+ /* XPM bit start address in range [0,8191]bits - rounded by FW to
+ * multiple of 8bits
+ */
+ __le32 xpm_bit_address;
+ __le32 num_bytes;
+ /* boolean flag indicating whether FW should verify the write
+ * operation
+ */
+ u8 verify;
+ u8 reserved1[3];
+ /* actual size=num_bytes */
+ u8 data_bytes[];
+} __packed;
+
+/* Possible modes for temperature measurement */
+enum wmi_temperature_measure_mode {
+ TEMPERATURE_USE_OLD_VALUE = 0x01,
+ TEMPERATURE_MEASURE_NOW = 0x02,
+};
+
+/* WMI_TEMP_SENSE_CMDID */
+struct wmi_temp_sense_cmd {
+ __le32 measure_baseband_en;
+ __le32 measure_rf_en;
+ __le32 measure_mode;
+} __packed;
+
+enum wmi_pmc_op {
+ WMI_PMC_ALLOCATE = 0x00,
+ WMI_PMC_RELEASE = 0x01,
+};
+
+/* WMI_PMC_CMDID */
+struct wmi_pmc_cmd {
+ /* enum wmi_pmc_cmd_op_type */
+ u8 op;
+ u8 reserved;
+ __le16 ring_size;
+ __le64 mem_base;
+} __packed;
+
+enum wmi_aoa_meas_type {
+ WMI_AOA_PHASE_MEAS = 0x00,
+ WMI_AOA_PHASE_AMP_MEAS = 0x01,
+};
+
+/* WMI_AOA_MEAS_CMDID */
+struct wmi_aoa_meas_cmd {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ __le32 meas_rf_mask;
+} __packed;
+
+/* WMI_SET_MGMT_RETRY_LIMIT_CMDID */
+struct wmi_set_mgmt_retry_limit_cmd {
+ /* MAC retransmit limit for mgmt frames */
+ u8 mgmt_retry_limit;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* Zones: HIGH, MAX, CRITICAL */
+#define WMI_NUM_OF_TT_ZONES (3)
+
+struct wmi_tt_zone_limits {
+ /* Above this temperature this zone is active */
+ u8 temperature_high;
+ /* Below this temperature the adjacent lower zone is active */
+ u8 temperature_low;
+ u8 reserved[2];
+} __packed;
+
+/* Struct used for both configuration and status commands of thermal
+ * throttling
+ */
+struct wmi_tt_data {
+ /* Enable/Disable TT algorithm for baseband */
+ u8 bb_enabled;
+ u8 reserved0[3];
+ /* Define zones for baseband */
+ struct wmi_tt_zone_limits bb_zones[WMI_NUM_OF_TT_ZONES];
+ /* Enable/Disable TT algorithm for radio */
+ u8 rf_enabled;
+ u8 reserved1[3];
+ /* Define zones for all radio chips */
+ struct wmi_tt_zone_limits rf_zones[WMI_NUM_OF_TT_ZONES];
+} __packed;
+
+/* WMI_SET_THERMAL_THROTTLING_CFG_CMDID */
+struct wmi_set_thermal_throttling_cfg_cmd {
+ /* Command data */
+ struct wmi_tt_data tt_data;
+} __packed;
+
+/* WMI_NEW_STA_CMDID */
+struct wmi_new_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 aid;
+} __packed;
+
+/* WMI_DEL_STA_CMDID */
+struct wmi_del_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 disconnect_reason;
+} __packed;
+
+enum wmi_tof_burst_duration {
+ WMI_TOF_BURST_DURATION_250_USEC = 2,
+ WMI_TOF_BURST_DURATION_500_USEC = 3,
+ WMI_TOF_BURST_DURATION_1_MSEC = 4,
+ WMI_TOF_BURST_DURATION_2_MSEC = 5,
+ WMI_TOF_BURST_DURATION_4_MSEC = 6,
+ WMI_TOF_BURST_DURATION_8_MSEC = 7,
+ WMI_TOF_BURST_DURATION_16_MSEC = 8,
+ WMI_TOF_BURST_DURATION_32_MSEC = 9,
+ WMI_TOF_BURST_DURATION_64_MSEC = 10,
+ WMI_TOF_BURST_DURATION_128_MSEC = 11,
+ WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15,
+};
+
+enum wmi_tof_session_start_flags {
+ WMI_TOF_SESSION_START_FLAG_SECURED = 0x1,
+ WMI_TOF_SESSION_START_FLAG_ASAP = 0x2,
+ WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4,
+ WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8,
+};
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_ftm_dest_info {
+ u8 channel;
+ /* wmi_tof_session_start_flags_e */
+ u8 flags;
+ u8 initial_token;
+ u8 num_of_ftm_per_burst;
+ u8 num_of_bursts_exp;
+ /* wmi_tof_burst_duration_e */
+ u8 burst_duration;
+ /* Burst Period indicate interval between two consecutive burst
+ * instances, in units of 100 ms
+ */
+ __le16 burst_period;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved;
+ u8 num_burst_per_aoa_meas;
+} __packed;
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_tof_session_start_cmd {
+ __le32 session_id;
+ u8 reserved1;
+ u8 aoa_type;
+ __le16 num_of_dest;
+ u8 reserved[4];
+ struct wmi_ftm_dest_info ftm_dest_info[];
+} __packed;
+
+/* WMI_TOF_CFG_RESPONDER_CMDID */
+struct wmi_tof_cfg_responder_cmd {
+ u8 enable;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_tof_channel_info_report_type {
+ WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1,
+ WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2,
+ WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4,
+ WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8,
+ WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10,
+};
+
+/* WMI_TOF_CHANNEL_INFO_CMDID */
+struct wmi_tof_channel_info_cmd {
+ /* wmi_tof_channel_info_report_type_e */
+ __le32 channel_info_report_request;
+} __packed;
+
+/* WMI_TOF_SET_TX_RX_OFFSET_CMDID */
+struct wmi_tof_set_tx_rx_offset_cmd {
+ /* TX delay offset */
+ __le32 tx_offset;
+ /* RX delay offset */
+ __le32 rx_offset;
+ /* Mask to define which RFs to configure. 0 means all RFs */
+ __le32 rf_mask;
+ /* Offset to strongest tap of CIR */
+ __le32 precursor;
+} __packed;
+
+/* WMI_TOF_GET_TX_RX_OFFSET_CMDID */
+struct wmi_tof_get_tx_rx_offset_cmd {
+ /* rf index to read offsets from */
+ u8 rf_index;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */
+struct wmi_map_mcs_to_schd_params {
+ u8 mcs;
+ /* time in usec from start slot to start tx flow - default 15 */
+ u8 time_in_usec_before_initiate_tx;
+ /* RD enable - if yes consider RD according to STA mcs */
+ u8 rd_enabled;
+ u8 reserved;
+ /* time in usec from start slot to stop vring */
+ __le16 time_in_usec_to_stop_vring;
+ /* timeout to force flush from start of slot */
+ __le16 flush_to_in_usec;
+ /* per mcs the mac buffer limit size in bytes */
+ __le32 mac_buff_size_in_bytes;
+} __packed;
+
+/* WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID */
+struct wmi_fixed_scheduling_config_complete_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* This value exists for backwards compatibility only.
+ * Do not use it in new commands.
+ * Use dynamic arrays where possible.
+ */
+#define WMI_NUM_MCS (13)
+
+/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */
+struct wmi_fixed_scheduling_config_cmd {
+ /* defaults in the SAS table */
+ struct wmi_map_mcs_to_schd_params mcs_to_schd_params_map[WMI_NUM_MCS];
+ /* default 150 uSec */
+ __le16 max_sta_rd_ppdu_duration_in_usec;
+ /* default 300 uSec */
+ __le16 max_sta_grant_ppdu_duration_in_usec;
+ /* default 1000 uSec */
+ __le16 assoc_slot_duration_in_usec;
+ /* default 360 uSec */
+ __le16 virtual_slot_duration_in_usec;
+ /* each this field value slots start with grant frame to the station
+ * - default 2
+ */
+ u8 number_of_ap_slots_for_initiate_grant;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_ENABLE_FIXED_SCHEDULING_CMDID */
+struct wmi_enable_fixed_scheduling_cmd {
+ __le32 reserved;
+} __packed;
+
+/* WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID */
+struct wmi_enable_fixed_scheduling_complete_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID */
+struct wmi_set_multi_directed_omnis_config_cmd {
+ /* number of directed omnis at destination AP */
+ u8 dest_ap_num_directed_omnis;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID */
+struct wmi_set_multi_directed_omnis_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_GENERAL_CONFIG_EVENTID */
+struct wmi_radar_general_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONFIG_SELECT_EVENTID */
+struct wmi_radar_config_select_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ /* In unit of bytes */
+ __le32 fifo_size;
+ /* In unit of bytes */
+ __le32 pulse_size;
+} __packed;
+
+/* WMI_RADAR_PARAMS_CONFIG_EVENTID */
+struct wmi_radar_params_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_SET_MODE_EVENTID */
+struct wmi_radar_set_mode_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_CONTROL_EVENTID */
+struct wmi_radar_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CONTROL_EVENTID */
+struct wmi_radar_pci_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_LONG_RANGE_CONFIG_CMDID */
+struct wmi_set_long_range_config_cmd {
+ __le32 reserved;
+} __packed;
+
+/* WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID */
+struct wmi_set_long_range_config_complete_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* payload max size is 1024 bytes: max event buffer size (1044) - WMI headers
+ * (16) - prev struct field size (4)
+ */
+#define WMI_MAX_IOCTL_PAYLOAD_SIZE (1024)
+#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (1024)
+#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (1024)
+
+enum wmi_internal_fw_ioctl_code {
+ WMI_INTERNAL_FW_CODE_NONE = 0x0,
+ WMI_INTERNAL_FW_CODE_QCOM = 0x1,
+};
+
+/* WMI_INTERNAL_FW_IOCTL_CMDID */
+struct wmi_internal_fw_ioctl_cmd {
+ /* enum wmi_internal_fw_ioctl_code */
+ __le16 code;
+ __le16 length;
+ /* payload max size is WMI_MAX_IOCTL_PAYLOAD_SIZE
+ * Must be the last member of the struct
+ */
+ __le32 payload[];
+} __packed;
+
+/* WMI_INTERNAL_FW_IOCTL_EVENTID */
+struct wmi_internal_fw_ioctl_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved;
+ __le16 length;
+ /* payload max size is WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE
+ * Must be the last member of the struct
+ */
+ __le32 payload[];
+} __packed;
+
+/* WMI_INTERNAL_FW_EVENT_EVENTID */
+struct wmi_internal_fw_event_event {
+ __le16 id;
+ __le16 length;
+ /* payload max size is WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE
+ * Must be the last member of the struct
+ */
+ __le32 payload[];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_WEIGHT_CMDID */
+struct wmi_set_vring_priority_weight_cmd {
+ /* Array of weights. Valid values are
+ * WMI_QOS_MIN_DEFAULT_WEIGHT...WMI_QOS_MAX_WEIGHT. Weight #0 is
+ * hard-coded WMI_QOS_MIN_WEIGHT. This array provide the weights
+ * #1..#3
+ */
+ u8 weight[3];
+ u8 reserved;
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_vring_priority {
+ u8 vring_idx;
+ /* Weight index. Valid value is 0-3 */
+ u8 priority;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_CMDID */
+struct wmi_set_vring_priority_cmd {
+ /* number of entries in vring_priority. Set to
+ * WMI_QOS_SET_VIF_PRIORITY to update the VIF's priority, and there
+ * will be only one entry in vring_priority
+ */
+ u8 num_of_vrings;
+ u8 reserved[3];
+ struct wmi_vring_priority vring_priority[];
+} __packed;
+
+/* WMI_BF_CONTROL_CMDID - deprecated */
+struct wmi_bf_control_cmd {
+ /* wmi_bf_triggers */
+ __le32 triggers;
+ u8 cid;
+ /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */
+ u8 txss_mode;
+ /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */
+ u8 brp_mode;
+ /* Max cts threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_thr;
+ /* Max cts threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_dense_thr;
+ /* Max b-ack threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_thr;
+ /* Max b-ack threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_dense_thr;
+ u8 reserved0;
+ /* Wrong sectors threshold */
+ __le32 wrong_sector_bis_thr;
+ /* BOOL to enable/disable long term trigger */
+ u8 long_term_enable;
+ /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and
+ * long_term_trig_timeout_per_mcs arrays, 0 = Ignore
+ */
+ u8 long_term_update_thr;
+ /* Long term throughput threshold [Mbps] */
+ u8 long_term_mbps_th_tbl[WMI_NUM_MCS];
+ u8 reserved1;
+ /* Long term timeout threshold table [msec] */
+ __le16 long_term_trig_timeout_per_mcs[WMI_NUM_MCS];
+ u8 reserved2[2];
+} __packed;
+
+/* BF configuration for each MCS */
+struct wmi_bf_control_ex_mcs {
+ /* Long term throughput threshold [Mbps] */
+ u8 long_term_mbps_th_tbl;
+ u8 reserved;
+ /* Long term timeout threshold table [msec] */
+ __le16 long_term_trig_timeout_per_mcs;
+} __packed;
+
+/* WMI_BF_CONTROL_EX_CMDID */
+struct wmi_bf_control_ex_cmd {
+ /* wmi_bf_triggers */
+ __le32 triggers;
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */
+ u8 txss_mode;
+ /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */
+ u8 brp_mode;
+ /* Max cts threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_thr;
+ /* Max cts threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP)
+ */
+ u8 bf_trigger_max_cts_failure_dense_thr;
+ /* Max b-ack threshold (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_thr;
+ /* Max b-ack threshold in dense (correspond to
+ * WMI_BF_TRIGGER_MAX_BACK_FAILURE)
+ */
+ u8 bf_trigger_max_back_failure_dense_thr;
+ u8 reserved0;
+ /* Wrong sectors threshold */
+ __le32 wrong_sector_bis_thr;
+ /* BOOL to enable/disable long term trigger */
+ u8 long_term_enable;
+ /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and
+ * long_term_trig_timeout_per_mcs arrays, 0 = Ignore
+ */
+ u8 long_term_update_thr;
+ u8 each_mcs_cfg_size;
+ u8 reserved1;
+ /* Configuration for each MCS */
+ struct wmi_bf_control_ex_mcs each_mcs_cfg[];
+} __packed;
+
+/* WMI_LINK_STATS_CMD */
+enum wmi_link_stats_action {
+ WMI_LINK_STATS_SNAPSHOT = 0x00,
+ WMI_LINK_STATS_PERIODIC = 0x01,
+ WMI_LINK_STATS_STOP_PERIODIC = 0x02,
+};
+
+/* WMI_LINK_STATS_EVENT record identifiers */
+enum wmi_link_stats_record_type {
+ WMI_LINK_STATS_TYPE_BASIC = 0x01,
+ WMI_LINK_STATS_TYPE_GLOBAL = 0x02,
+};
+
+/* WMI_LINK_STATS_CMDID */
+struct wmi_link_stats_cmd {
+ /* bitmask of required record types
+ * (wmi_link_stats_record_type_e)
+ */
+ __le32 record_type_mask;
+ /* 0xff for all cids */
+ u8 cid;
+ /* wmi_link_stats_action_e */
+ u8 action;
+ u8 reserved[6];
+ /* range = 100 - 10000 */
+ __le32 interval_msec;
+} __packed;
+
+/* WMI_SET_GRANT_MCS_CMDID */
+struct wmi_set_grant_mcs_cmd {
+ u8 mcs;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_CMDID */
+struct wmi_set_ap_slot_size_cmd {
+ __le32 slot_size;
+} __packed;
+
+/* WMI_TEMP_SENSE_ALL_CMDID */
+struct wmi_temp_sense_all_cmd {
+ u8 measure_baseband_en;
+ u8 measure_rf_en;
+ u8 measure_mode;
+ u8 reserved;
+} __packed;
+
+/* WMI Events
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_START_SCHED_SCAN_EVENTID = 0x1005,
+ WMI_STOP_SCHED_SCAN_EVENTID = 0x1006,
+ WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100A,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
+ WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_LINK_MONITOR_EVENTID = 0x100E,
+ WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
+ WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
+ WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
+ WMI_RADAR_SET_MODE_EVENTID = 0x1103,
+ WMI_RADAR_CONTROL_EVENTID = 0x1104,
+ WMI_RADAR_PCI_CONTROL_EVENTID = 0x1105,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_DEEP_ECHO_RSP_EVENTID = 0x1804,
+ /* deprecated */
+ WMI_FS_TUNE_DONE_EVENTID = 0x180A,
+ /* deprecated */
+ WMI_CORR_MEASURE_EVENTID = 0x180B,
+ WMI_READ_RSSI_EVENTID = 0x180C,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180F,
+ /* deprecated */
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ /* deprecated */
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_LO_POWER_CALIB_FROM_OTP_EVENTID = 0x1817,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
+ /* deprecated */
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ /* Event is shared between WMI_SW_TX_REQ_CMDID and
+ * WMI_SW_TX_REQ_EXT_CMDID
+ */
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
+ WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_BF_TRIG_EVENTID = 0x183A,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
+ WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
+ WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_SET_LINK_MONITOR_EVENTID = 0x1845,
+ WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856,
+ WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857,
+ WMI_LED_CFG_DONE_EVENTID = 0x1858,
+ WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C,
+ WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D,
+ WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E,
+ WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID = 0x185F,
+ /* Performance monitoring events */
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+ WMI_RING_EN_EVENTID = 0x1865,
+ WMI_GET_RF_STATUS_EVENTID = 0x1866,
+ WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
+ WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ /* Power management */
+ WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904,
+ WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
+ /* P2P */
+ WMI_P2P_CFG_DONE_EVENTID = 0x1910,
+ WMI_PORT_ALLOCATED_EVENTID = 0x1911,
+ WMI_PORT_DELETED_EVENTID = 0x1912,
+ WMI_LISTEN_STARTED_EVENTID = 0x1914,
+ WMI_SEARCH_STARTED_EVENTID = 0x1915,
+ WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
+ WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
+ WMI_PCP_STARTED_EVENTID = 0x1918,
+ WMI_PCP_STOPPED_EVENTID = 0x1919,
+ WMI_PCP_FACTOR_EVENTID = 0x191A,
+ /* Power Save Configuration Events */
+ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ WMI_RS_ENABLE_EVENTID = 0x191E,
+ WMI_RS_CFG_EX_EVENTID = 0x191F,
+ WMI_GET_DETAILED_RS_RES_EX_EVENTID = 0x1920,
+ /* deprecated */
+ WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ /* deprecated */
+ WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
+ WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
+ WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
+ WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
+ WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940,
+ WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941,
+ /* return the Power Save profile */
+ WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942,
+ WMI_TSF_SYNC_STATUS_EVENTID = 0x1973,
+ WMI_TOF_SESSION_END_EVENTID = 0x1991,
+ WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
+ WMI_TOF_SET_LCR_EVENTID = 0x1993,
+ WMI_TOF_SET_LCI_EVENTID = 0x1994,
+ WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
+ WMI_TOF_CFG_RESPONDER_EVENTID = 0x1996,
+ WMI_TOF_SET_TX_RX_OFFSET_EVENTID = 0x1997,
+ WMI_TOF_GET_TX_RX_OFFSET_EVENTID = 0x1998,
+ WMI_TOF_CHANNEL_INFO_EVENTID = 0x1999,
+ WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A0,
+ WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A1,
+ WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A2,
+ WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A3,
+ WMI_SET_RF_SECTOR_ON_DONE_EVENTID = 0x19A4,
+ WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5,
+ WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
+ WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
+ /* deprecated */
+ WMI_BF_CONTROL_EVENTID = 0x19AA,
+ WMI_BF_CONTROL_EX_EVENTID = 0x19AB,
+ WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
+ WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
+ WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
+ WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
+ WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
+ WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
+ WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
+ WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
+ WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04,
+ WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05,
+ WMI_GET_ASSOC_LIST_RES_EVENTID = 0x1A06,
+ WMI_GET_CCA_INDICATIONS_EVENTID = 0x1A07,
+ WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08,
+ WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A,
+ WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B,
+ WMI_LINK_STATS_CONFIG_DONE_EVENTID = 0x1A0C,
+ WMI_LINK_STATS_EVENTID = 0x1A0D,
+ WMI_SET_GRANT_MCS_EVENTID = 0x1A0E,
+ WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
+ WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
+ WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
+ WMI_RBUFCAP_CFG_EVENTID = 0x1A12,
+ WMI_TEMP_SENSE_ALL_DONE_EVENTID = 0x1A13,
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+ WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
+ WMI_INTERNAL_FW_SET_CHANNEL = 0x9006,
+ WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF,
+};
+
+/* Events data structures */
+enum wmi_fw_status {
+ WMI_FW_STATUS_SUCCESS = 0x00,
+ WMI_FW_STATUS_FAILURE = 0x01,
+};
+
+/* WMI_RF_MGMT_STATUS_EVENTID */
+enum wmi_rf_status {
+ WMI_RF_ENABLED = 0x00,
+ WMI_RF_DISABLED_HW = 0x01,
+ WMI_RF_DISABLED_SW = 0x02,
+ WMI_RF_DISABLED_HW_SW = 0x03,
+};
+
+/* WMI_RF_MGMT_STATUS_EVENTID */
+struct wmi_rf_mgmt_status_event {
+ __le32 rf_status;
+} __packed;
+
+/* WMI_GET_STATUS_DONE_EVENTID */
+struct wmi_get_status_done_event {
+ __le32 is_associated;
+ u8 cid;
+ u8 reserved0[3];
+ u8 bssid[WMI_MAC_LEN];
+ u8 channel;
+ u8 reserved1;
+ u8 network_type;
+ u8 reserved2[3];
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ __le32 rf_status;
+ __le32 is_secured;
+} __packed;
+
+/* WMI_FW_VER_EVENTID */
+struct wmi_fw_ver_event {
+ /* FW image version */
+ __le32 fw_major;
+ __le32 fw_minor;
+ __le32 fw_subminor;
+ __le32 fw_build;
+ /* FW image build time stamp */
+ __le32 hour;
+ __le32 minute;
+ __le32 second;
+ __le32 day;
+ __le32 month;
+ __le32 year;
+ /* Boot Loader image version */
+ __le32 bl_major;
+ __le32 bl_minor;
+ __le32 bl_subminor;
+ __le32 bl_build;
+ /* The number of entries in the FW capabilities array */
+ u8 fw_capabilities_len;
+ u8 reserved[3];
+ /* FW capabilities info
+ * Must be the last member of the struct
+ */
+ __le32 fw_capabilities[];
+} __packed;
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_type {
+ RF_UNKNOWN = 0x00,
+ RF_MARLON = 0x01,
+ RF_SPARROW = 0x02,
+ RF_TALYNA1 = 0x03,
+ RF_TALYNA2 = 0x04,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum board_file_rf_type {
+ BF_RF_MARLON = 0x00,
+ BF_RF_SPARROW = 0x01,
+ BF_RF_TALYNA1 = 0x02,
+ BF_RF_TALYNA2 = 0x03,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_status {
+ RF_OK = 0x00,
+ RF_NO_COMM = 0x01,
+ RF_WRONG_BOARD_FILE = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+struct wmi_get_rf_status_event {
+ /* enum rf_type */
+ __le32 rf_type;
+ /* attached RFs bit vector */
+ __le32 attached_rf_vector;
+ /* enabled RFs bit vector */
+ __le32 enabled_rf_vector;
+ /* enum rf_status, refers to enabled RFs */
+ u8 rf_status[32];
+ /* enum board file RF type */
+ __le32 board_file_rf_type;
+ /* board file platform type */
+ __le32 board_file_platform_type;
+ /* board file version */
+ __le32 board_file_version;
+ /* enabled XIFs bit vector */
+ __le32 enabled_xif_vector;
+ __le32 reserved;
+} __packed;
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+enum baseband_type {
+ BASEBAND_UNKNOWN = 0x00,
+ BASEBAND_SPARROW_M_A0 = 0x03,
+ BASEBAND_SPARROW_M_A1 = 0x04,
+ BASEBAND_SPARROW_M_B0 = 0x05,
+ BASEBAND_SPARROW_M_C0 = 0x06,
+ BASEBAND_SPARROW_M_D0 = 0x07,
+ BASEBAND_TALYN_M_A0 = 0x08,
+ BASEBAND_TALYN_M_B0 = 0x09,
+};
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+struct wmi_get_baseband_type_event {
+ /* enum baseband_type */
+ __le32 baseband_type;
+} __packed;
+
+/* WMI_MAC_ADDR_RESP_EVENTID */
+struct wmi_mac_addr_resp_event {
+ u8 mac[WMI_MAC_LEN];
+ u8 auth_mode;
+ u8 crypt_mode;
+ __le32 offload_mode;
+} __packed;
+
+/* WMI_EAPOL_RX_EVENTID */
+struct wmi_eapol_rx_event {
+ u8 src_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[];
+} __packed;
+
+/* WMI_READY_EVENTID */
+enum wmi_phy_capability {
+ WMI_11A_CAPABILITY = 0x01,
+ WMI_11G_CAPABILITY = 0x02,
+ WMI_11AG_CAPABILITY = 0x03,
+ WMI_11NA_CAPABILITY = 0x04,
+ WMI_11NG_CAPABILITY = 0x05,
+ WMI_11NAG_CAPABILITY = 0x06,
+ WMI_11AD_CAPABILITY = 0x07,
+ WMI_11N_CAPABILITY_OFFSET = 0x03,
+};
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac[WMI_MAC_LEN];
+ /* enum wmi_phy_capability */
+ u8 phy_capability;
+ u8 numof_additional_mids;
+ /* rfc read calibration result. 5..15 */
+ u8 rfc_read_calib_result;
+ /* Max associated STAs supported by FW in AP mode (default 0 means 8
+ * STA)
+ */
+ u8 max_assoc_sta;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_NOTIFY_REQ_DONE_EVENTID */
+struct wmi_notify_req_done_event {
+ /* beamforming status, 0: fail; 1: OK; 2: retrying */
+ __le32 status;
+ __le64 tsf;
+ s8 rssi;
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ u8 reserved0[2];
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le16 bf_mcs;
+ __le16 my_rx_sector;
+ __le16 my_tx_sector;
+ __le16 other_rx_sector;
+ __le16 other_tx_sector;
+ __le16 range;
+ u8 sqi;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_CONNECT_EVENTID */
+struct wmi_connect_event {
+ /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is
+ * the primary channel number
+ */
+ u8 channel;
+ /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */
+ u8 edmg_channel;
+ u8 bssid[WMI_MAC_LEN];
+ __le16 listen_interval;
+ __le16 beacon_interval;
+ u8 network_type;
+ u8 reserved1[3];
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 cid;
+ u8 aid;
+ u8 reserved2[2];
+ /* not in use */
+ u8 assoc_info[];
+} __packed;
+
+/* disconnect_reason */
+enum wmi_disconnect_reason {
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01,
+ /* bmiss */
+ WMI_DIS_REASON_LOST_LINK = 0x02,
+ WMI_DIS_REASON_DISCONNECT_CMD = 0x03,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 0x04,
+ WMI_DIS_REASON_AUTH_FAILED = 0x05,
+ WMI_DIS_REASON_ASSOC_FAILED = 0x06,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 0x07,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 0x08,
+ WMI_DIS_REASON_INVALID_PROFILE = 0x0A,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 0x0B,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
+ WMI_DIS_REASON_IBSS_MERGE = 0x0E,
+ WMI_DIS_REASON_HIGH_TEMPERATURE = 0x0F,
+};
+
+/* WMI_DISCONNECT_EVENTID */
+struct wmi_disconnect_event {
+ /* reason code, see 802.11 spec. */
+ __le16 protocol_reason_status;
+ /* set if known */
+ u8 bssid[WMI_MAC_LEN];
+ /* see enum wmi_disconnect_reason */
+ u8 disconnect_reason;
+ /* last assoc req may passed to host - not in used */
+ u8 assoc_resp_len;
+ /* last assoc req may passed to host - not in used */
+ u8 assoc_info[];
+} __packed;
+
+/* WMI_SCAN_COMPLETE_EVENTID */
+enum scan_status {
+ WMI_SCAN_SUCCESS = 0x00,
+ WMI_SCAN_FAILED = 0x01,
+ WMI_SCAN_ABORTED = 0x02,
+ WMI_SCAN_REJECTED = 0x03,
+ WMI_SCAN_ABORT_REJECTED = 0x04,
+};
+
+struct wmi_scan_complete_event {
+ /* enum scan_status */
+ __le32 status;
+} __packed;
+
+/* WMI_FT_AUTH_STATUS_EVENTID */
+struct wmi_ft_auth_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 ie_len;
+ u8 ie_info[];
+} __packed;
+
+/* WMI_FT_REASSOC_STATUS_EVENTID */
+struct wmi_ft_reassoc_status_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* association id received from new AP */
+ u8 aid;
+ /* enum wmi_channel */
+ u8 channel;
+ /* enum wmi_channel */
+ u8 edmg_channel;
+ u8 mac_addr[WMI_MAC_LEN];
+ __le16 beacon_ie_len;
+ __le16 reassoc_req_ie_len;
+ __le16 reassoc_resp_ie_len;
+ u8 reserved[4];
+ u8 ie_info[];
+} __packed;
+
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 rssi;
+ u8 range;
+ u8 sqi;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 qid;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 mid;
+ u8 cid;
+ /* From Radio MNGR */
+ u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+ WMI_PNO_SUCCESS = 0x00,
+ WMI_PNO_REJECT = 0x01,
+ WMI_PNO_INVALID_PARAMETERS = 0x02,
+ WMI_PNO_NOT_ENABLED = 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+ /* wmi_pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+ /* wmi_pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[];
+} __packed;
+
+/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
+enum wmi_acs_info_bitmask {
+ WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
+ WMI_ACS_INFO_BITMASK_BUSY_TIME = 0x02,
+ WMI_ACS_INFO_BITMASK_TX_TIME = 0x04,
+ WMI_ACS_INFO_BITMASK_RX_TIME = 0x08,
+ WMI_ACS_INFO_BITMASK_NOISE = 0x10,
+};
+
+struct scan_acs_info {
+ u8 channel;
+ u8 beacon_found;
+ /* msec */
+ __le16 busy_time;
+ __le16 tx_time;
+ __le16 rx_time;
+ u8 noise;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_acs_passive_scan_complete_event {
+ __le32 dwell_time;
+ /* valid fields within channel info according to
+ * their appearance in struct order
+ */
+ __le16 filled;
+ u8 num_scanned_channels;
+ u8 reserved;
+ struct scan_acs_info scan_info_list[];
+} __packed;
+
+/* WMI_BA_STATUS_EVENTID */
+enum wmi_vring_ba_status {
+ WMI_BA_AGREED = 0x00,
+ WMI_BA_NON_AGREED = 0x01,
+ /* BA_EN in middle of teardown flow */
+ WMI_BA_TD_WIP = 0x02,
+ /* BA_DIS or BA_EN in middle of BA SETUP flow */
+ WMI_BA_SETUP_WIP = 0x03,
+ /* BA_EN when the BA session is already active */
+ WMI_BA_SESSION_ACTIVE = 0x04,
+ /* BA_DIS when the BA session is not active */
+ WMI_BA_SESSION_NOT_ACTIVE = 0x05,
+};
+
+struct wmi_ba_status_event {
+ /* enum wmi_vring_ba_status */
+ __le16 status;
+ u8 reserved[2];
+ u8 ringid;
+ u8 agg_wsize;
+ __le16 ba_timeout;
+ u8 amsdu;
+} __packed;
+
+/* WMI_DELBA_EVENTID */
+struct wmi_delba_event {
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 from_initiator;
+ __le16 reason;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_VRING_CFG_DONE_EVENTID */
+struct wmi_vring_cfg_done_event {
+ u8 ringid;
+ u8 status;
+ u8 reserved[2];
+ __le32 tx_vring_tail_ptr;
+} __packed;
+
+/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */
+struct wmi_rcp_addba_resp_sent_event {
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 reserved;
+ __le16 status;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ u8 reserved2[2];
+} __packed;
+
+/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_tx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
+struct wmi_rx_status_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
+struct wmi_cfg_def_rx_offload_done_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_tx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
+struct wmi_rx_desc_ring_cfg_done_event {
+ u8 ring_id;
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+ __le32 ring_tail_ptr;
+} __packed;
+
+/* WMI_RCP_ADDBA_REQ_EVENTID */
+struct wmi_rcp_addba_req_event {
+ /* Used for cid less than 8. For higher cid set
+ * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead
+ */
+ u8 cidxtid;
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 cid;
+ /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */
+ u8 tid;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_CFG_RX_CHAIN_DONE_EVENTID */
+enum wmi_cfg_rx_chain_done_event_status {
+ WMI_CFG_RX_CHAIN_SUCCESS = 0x01,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+ /* V-Ring Tail pointer */
+ __le32 rx_ring_tail_ptr;
+ __le32 status;
+} __packed;
+
+/* WMI_WBE_LINK_DOWN_EVENTID */
+enum wmi_wbe_link_down_event_reason {
+ WMI_WBE_REASON_USER_REQUEST = 0x00,
+ WMI_WBE_REASON_RX_DISASSOC = 0x01,
+ WMI_WBE_REASON_BAD_PHY_LINK = 0x02,
+};
+
+/* WMI_WBE_LINK_DOWN_EVENTID */
+struct wmi_wbe_link_down_event {
+ u8 cid;
+ u8 reserved[3];
+ __le32 reason;
+} __packed;
+
+/* WMI_DATA_PORT_OPEN_EVENTID */
+struct wmi_data_port_open_event {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RING_EN_EVENTID */
+struct wmi_ring_en_event {
+ u8 ring_index;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_PCP_CHANNEL_EVENTID */
+struct wmi_get_pcp_channel_event {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_P2P_CFG_DONE_EVENTID */
+struct wmi_p2p_cfg_done_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PORT_ALLOCATED_EVENTID */
+struct wmi_port_allocated_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PORT_DELETED_EVENTID */
+struct wmi_port_deleted_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LISTEN_STARTED_EVENTID */
+struct wmi_listen_started_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SEARCH_STARTED_EVENTID */
+struct wmi_search_started_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PCP_STARTED_EVENTID */
+struct wmi_pcp_started_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PCP_FACTOR_EVENTID */
+struct wmi_pcp_factor_event {
+ __le32 pcp_factor;
+} __packed;
+
+enum wmi_sw_tx_status {
+ WMI_TX_SW_STATUS_SUCCESS = 0x00,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 0x01,
+ WMI_TX_SW_STATUS_FAILED_TX = 0x02,
+};
+
+/* WMI_SW_TX_COMPLETE_EVENTID */
+struct wmi_sw_tx_complete_event {
+ /* enum wmi_sw_tx_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_CORR_MEASURE_EVENTID - deprecated */
+struct wmi_corr_measure_event {
+ /* signed */
+ __le32 i;
+ /* signed */
+ __le32 q;
+ /* signed */
+ __le32 image_i;
+ /* signed */
+ __le32 image_q;
+} __packed;
+
+/* WMI_READ_RSSI_EVENTID */
+struct wmi_read_rssi_event {
+ __le32 ina_rssi_adc_dbm;
+} __packed;
+
+/* WMI_GET_SSID_EVENTID */
+struct wmi_get_ssid_event {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
+struct wmi_rf_xpm_read_result_event {
+ /* enum wmi_fw_status_e - success=0 or fail=1 */
+ u8 status;
+ u8 reserved[3];
+ /* requested num_bytes of data */
+ u8 data_bytes[];
+} __packed;
+
+/* EVENT: WMI_RF_XPM_WRITE_RESULT_EVENTID */
+struct wmi_rf_xpm_write_result_event {
+ /* enum wmi_fw_status_e - success=0 or fail=1 */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TX_MGMT_PACKET_EVENTID */
+struct wmi_tx_mgmt_packet_event {
+ u8 payload[0];
+} __packed;
+
+/* WMI_RX_MGMT_PACKET_EVENTID */
+struct wmi_rx_mgmt_packet_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[];
+} __packed;
+
+/* WMI_ECHO_RSP_EVENTID */
+struct wmi_echo_rsp_event {
+ __le32 echoed_value;
+} __packed;
+
+/* WMI_DEEP_ECHO_RSP_EVENTID */
+struct wmi_deep_echo_rsp_event {
+ __le32 echoed_value;
+} __packed;
+
+/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */
+struct wmi_rf_pwr_on_delay_rsp_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID */
+struct wmi_set_high_power_table_params_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID */
+struct wmi_fixed_scheduling_ul_config_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TEMP_SENSE_DONE_EVENTID
+ *
+ * Measure MAC and radio temperatures
+ */
+struct wmi_temp_sense_done_event {
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
+ */
+ __le32 baseband_t1000;
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
+ */
+ __le32 rf_t1000;
+} __packed;
+
+#define WMI_SCAN_DWELL_TIME_MS (100)
+#define WMI_SURVEY_TIMEOUT_MS (10000)
+
+enum wmi_hidden_ssid {
+ WMI_HIDDEN_SSID_DISABLED = 0x00,
+ WMI_HIDDEN_SSID_SEND_EMPTY = 0x10,
+ WMI_HIDDEN_SSID_CLEAR = 0xFE,
+};
+
+/* WMI_LED_CFG_CMDID
+ *
+ * Configure LED On\Off\Blinking operation
+ *
+ * Returned events:
+ * - WMI_LED_CFG_DONE_EVENTID
+ */
+enum led_mode {
+ LED_DISABLE = 0x00,
+ LED_ENABLE = 0x01,
+};
+
+/* The names of the led as
+ * described on HW schemes.
+ */
+enum wmi_led_id {
+ WMI_LED_WLAN = 0x00,
+ WMI_LED_WPAN = 0x01,
+ WMI_LED_WWAN = 0x02,
+};
+
+/* Led polarity mode. */
+enum wmi_led_polarity {
+ LED_POLARITY_HIGH_ACTIVE = 0x00,
+ LED_POLARITY_LOW_ACTIVE = 0x01,
+};
+
+/* Combination of on and off
+ * creates the blinking period
+ */
+struct wmi_led_blink_mode {
+ __le32 blink_on;
+ __le32 blink_off;
+} __packed;
+
+/* WMI_LED_CFG_CMDID */
+struct wmi_led_cfg_cmd {
+ /* enum led_mode_e */
+ u8 led_mode;
+ /* enum wmi_led_id_e */
+ u8 id;
+ /* slow speed blinking combination */
+ struct wmi_led_blink_mode slow_blink_cfg;
+ /* medium speed blinking combination */
+ struct wmi_led_blink_mode medium_blink_cfg;
+ /* high speed blinking combination */
+ struct wmi_led_blink_mode fast_blink_cfg;
+ /* polarity of the led */
+ u8 led_polarity;
+ /* reserved */
+ u8 reserved;
+} __packed;
+
+/* \WMI_SET_CONNECT_SNR_THR_CMDID */
+struct wmi_set_connect_snr_thr_cmd {
+ u8 enable;
+ u8 reserved;
+ /* 1/4 Db units */
+ __le16 omni_snr_thr;
+ /* 1/4 Db units */
+ __le16 direct_snr_thr;
+} __packed;
+
+/* WMI_LED_CFG_DONE_EVENTID */
+struct wmi_led_cfg_done_event {
+ /* led config status */
+ __le32 status;
+} __packed;
+
+/* Rate search parameters configuration per connection */
+struct wmi_rs_cfg {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold[WMI_NUM_MCS];
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt[WMI_NUM_MCS];
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ __le32 back_failure_mask;
+ __le32 mcs_en_vec;
+} __packed;
+
+enum wmi_edmg_tx_mode {
+ WMI_TX_MODE_DMG = 0x0,
+ WMI_TX_MODE_EDMG_CB1 = 0x1,
+ WMI_TX_MODE_EDMG_CB2 = 0x2,
+ WMI_TX_MODE_EDMG_CB1_LONG_LDPC = 0x3,
+ WMI_TX_MODE_EDMG_CB2_LONG_LDPC = 0x4,
+ WMI_TX_MODE_MAX,
+};
+
+/* Rate search parameters common configuration */
+struct wmi_rs_cfg_ex_common {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ u8 reserved[3];
+ __le32 back_failure_mask;
+} __packed;
+
+/* Rate search parameters configuration per MCS */
+struct wmi_rs_cfg_ex_mcs {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold;
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_CFG_EX_CMDID */
+struct wmi_rs_cfg_ex_cmd {
+ /* Configuration for all MCSs */
+ struct wmi_rs_cfg_ex_common common_cfg;
+ u8 each_mcs_cfg_size;
+ u8 reserved[3];
+ /* Configuration for each MCS */
+ struct wmi_rs_cfg_ex_mcs each_mcs_cfg[];
+} __packed;
+
+/* WMI_RS_CFG_EX_EVENTID */
+struct wmi_rs_cfg_ex_event {
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_RS_ENABLE_CMDID */
+struct wmi_rs_enable_cmd {
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ u8 reserved[2];
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_ENABLE_EVENTID */
+struct wmi_rs_enable_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* Slot types */
+enum wmi_sched_scheme_slot_type {
+ WMI_SCHED_SLOT_SP = 0x0,
+ WMI_SCHED_SLOT_CBAP = 0x1,
+ WMI_SCHED_SLOT_IDLE = 0x2,
+ WMI_SCHED_SLOT_ANNOUNCE_NO_ACK = 0x3,
+ WMI_SCHED_SLOT_DISCOVERY = 0x4,
+};
+
+enum wmi_sched_scheme_slot_flags {
+ WMI_SCHED_SCHEME_SLOT_PERIODIC = 0x1,
+};
+
+struct wmi_sched_scheme_slot {
+ /* in microsecond */
+ __le32 tbtt_offset;
+ /* wmi_sched_scheme_slot_flags */
+ u8 flags;
+ /* wmi_sched_scheme_slot_type */
+ u8 type;
+ /* in microsecond */
+ __le16 duration;
+ /* frame_exchange_sequence_duration */
+ __le16 tx_op;
+ /* time in microseconds between two consecutive slots
+ * relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set
+ */
+ __le16 period;
+ /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set
+ * number of times to repeat allocation
+ */
+ u8 num_of_blocks;
+ /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set
+ * every idle_period allocation will be idle
+ */
+ u8 idle_period;
+ u8 src_aid;
+ u8 dest_aid;
+ __le32 reserved;
+} __packed;
+
+enum wmi_sched_scheme_flags {
+ /* should not be set when clearing scheduling scheme */
+ WMI_SCHED_SCHEME_ENABLE = 0x01,
+ WMI_SCHED_PROTECTED_SP = 0x02,
+ /* should be set only on first WMI fragment of scheme */
+ WMI_SCHED_FIRST = 0x04,
+ /* should be set only on last WMI fragment of scheme */
+ WMI_SCHED_LAST = 0x08,
+ WMI_SCHED_IMMEDIATE_START = 0x10,
+};
+
+enum wmi_sched_scheme_advertisment {
+ /* ESE is not advertised at all, STA has to be configured with WMI
+ * also
+ */
+ WMI_ADVERTISE_ESE_DISABLED = 0x0,
+ WMI_ADVERTISE_ESE_IN_BEACON = 0x1,
+ WMI_ADVERTISE_ESE_IN_ANNOUNCE_FRAME = 0x2,
+};
+
+/* WMI_SCHEDULING_SCHEME_CMD */
+struct wmi_scheduling_scheme_cmd {
+ u8 serial_num;
+ /* wmi_sched_scheme_advertisment */
+ u8 ese_advertisment;
+ /* wmi_sched_scheme_flags */
+ __le16 flags;
+ u8 num_allocs;
+ u8 reserved[3];
+ __le64 start_tbtt;
+ /* allocations list */
+ struct wmi_sched_scheme_slot allocs[WMI_SCHED_MAX_ALLOCS_PER_CMD];
+} __packed;
+
+enum wmi_sched_scheme_failure_type {
+ WMI_SCHED_SCHEME_FAILURE_NO_ERROR = 0x00,
+ WMI_SCHED_SCHEME_FAILURE_OLD_START_TSF_ERR = 0x01,
+};
+
+/* WMI_SCHEDULING_SCHEME_EVENTID */
+struct wmi_scheduling_scheme_event {
+ /* wmi_fw_status_e */
+ u8 status;
+ /* serial number given in command */
+ u8 serial_num;
+ /* wmi_sched_scheme_failure_type */
+ u8 failure_type;
+ /* alignment to 32b */
+ u8 reserved[1];
+} __packed;
+
+/* WMI_RS_CFG_CMDID - deprecated */
+struct wmi_rs_cfg_cmd {
+ /* connection id */
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ /* rate search configuration */
+ struct wmi_rs_cfg rs_cfg;
+} __packed;
+
+/* WMI_RS_CFG_DONE_EVENTID - deprecated */
+struct wmi_rs_cfg_done_event {
+ u8 cid;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_CMDID - deprecated */
+struct wmi_get_detailed_rs_res_cmd {
+ /* connection id */
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* RS results status */
+enum wmi_rs_results_status {
+ WMI_RS_RES_VALID = 0x00,
+ WMI_RS_RES_INVALID = 0x01,
+};
+
+/* Rate search results */
+struct wmi_rs_results {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt[WMI_NUM_MCS];
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt[WMI_NUM_MCS];
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EVENTID - deprecated */
+struct wmi_get_detailed_rs_res_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ /* detailed rs results */
+ struct wmi_rs_results rs_results;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EX_CMDID */
+struct wmi_get_detailed_rs_res_ex_cmd {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_common {
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+ /* enum wmi_edmg_tx_mode */
+ u8 mode;
+ u8 reserved[2];
+} __packed;
+
+/* Rate search results */
+struct wmi_rs_results_ex_mcs {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt;
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EX_EVENTID */
+struct wmi_get_detailed_rs_res_ex_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ u8 reserved0[2];
+ struct wmi_rs_results_ex_common common_rs_results;
+ u8 each_mcs_results_size;
+ u8 reserved1[3];
+ /* Results for each MCS */
+ struct wmi_rs_results_ex_mcs each_mcs_results[];
+} __packed;
+
+/* BRP antenna limit mode */
+enum wmi_brp_ant_limit_mode {
+ /* Disable BRP force antenna limit */
+ WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00,
+ /* Define maximal antennas limit. Only effective antennas will be
+ * actually used
+ */
+ WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01,
+ /* Force a specific number of antennas */
+ WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02,
+ /* number of BRP antenna limit modes */
+ WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03,
+};
+
+/* WMI_BRP_SET_ANT_LIMIT_CMDID */
+struct wmi_brp_set_ant_limit_cmd {
+ /* connection id */
+ u8 cid;
+ /* enum wmi_brp_ant_limit_mode */
+ u8 limit_mode;
+ /* antenna limit count, 1-27
+ * disable_mode - ignored
+ * effective_mode - upper limit to number of antennas to be used
+ * force_mode - exact number of antennas to be used
+ */
+ u8 ant_limit;
+ u8 reserved;
+} __packed;
+
+/* WMI_BRP_SET_ANT_LIMIT_EVENTID */
+struct wmi_brp_set_ant_limit_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_bf_type {
+ WMI_BF_TYPE_SLS = 0x00,
+ WMI_BF_TYPE_BRP_RX = 0x01,
+};
+
+/* WMI_BF_TRIG_CMDID */
+struct wmi_bf_trig_cmd {
+ /* enum wmi_bf_type - type of requested beamforming */
+ u8 bf_type;
+ /* used only for WMI_BF_TYPE_BRP_RX */
+ u8 cid;
+ /* used only for WMI_BF_TYPE_SLS */
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved[4];
+} __packed;
+
+/* WMI_BF_TRIG_EVENTID */
+struct wmi_bf_trig_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 cid;
+ u8 reserved[2];
+} __packed;
+
+/* broadcast connection ID */
+#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
+
+/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
+enum wmi_link_maintain_cfg_type {
+ /* AP/PCP default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00,
+ /* AP/PCP default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01,
+ /* STA default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02,
+ /* STA default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03,
+ /* custom configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04,
+ /* number of defined configuration types */
+ WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05,
+};
+
+/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
+enum wmi_link_maintain_cfg_response_status {
+ /* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00,
+ /* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
+ * command request
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01,
+};
+
+/* Link Loss and Keep Alive configuration */
+struct wmi_link_maintain_cfg {
+ /* link_loss_enable_detectors_vec */
+ __le32 link_loss_enable_detectors_vec;
+ /* detectors check period usec */
+ __le32 check_link_loss_period_usec;
+ /* max allowed tx ageing */
+ __le32 tx_ageing_threshold_usec;
+ /* keep alive period for high SNR */
+ __le32 keep_alive_period_usec_high_snr;
+ /* keep alive period for low SNR */
+ __le32 keep_alive_period_usec_low_snr;
+ /* lower snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_low_db;
+ /* upper snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_high_db;
+ /* num of successive bad bcons causing link-loss */
+ __le32 bad_beacons_num_threshold;
+ /* SNR limit for bad_beacons_detector */
+ __le32 bad_beacons_snr_threshold_db;
+ /* timeout for disassoc response frame in uSec */
+ __le32 disconnect_timeout;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
+struct wmi_link_maintain_cfg_write_cmd {
+ /* enum wmi_link_maintain_cfg_type_e - type of requested default
+ * configuration to be applied
+ */
+ __le32 cfg_type;
+ /* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
+ __le32 cid;
+ /* custom configuration settings to be applied (relevant only if
+ * cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
+ */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
+struct wmi_link_maintain_cfg_read_cmd {
+ /* connection ID which configuration settings are requested */
+ __le32 cid;
+} __packed;
+
+/* WMI_SET_LINK_MONITOR_CMDID */
+struct wmi_set_link_monitor_cmd {
+ u8 rssi_hyst;
+ u8 reserved[12];
+ u8 rssi_thresholds_list_size;
+ s8 rssi_thresholds_list[];
+} __packed;
+
+/* wmi_link_monitor_event_type */
+enum wmi_link_monitor_event_type {
+ WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT = 0x00,
+ WMI_LINK_MONITOR_NOTIF_TX_ERR_EVT = 0x01,
+ WMI_LINK_MONITOR_NOTIF_THERMAL_EVT = 0x02,
+};
+
+/* WMI_SET_LINK_MONITOR_EVENTID */
+struct wmi_set_link_monitor_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_MONITOR_EVENTID */
+struct wmi_link_monitor_event {
+ /* link_monitor_event_type */
+ u8 type;
+ s8 rssi_level;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
+struct wmi_link_maintain_cfg_write_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - write status */
+ __le32 status;
+} __packed;
+
+/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
+struct wmi_link_maintain_cfg_read_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - read status */
+ __le32 status;
+ /* Retrieved configuration settings */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+enum wmi_traffic_suspend_status {
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
+ WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT = 0x2,
+ WMI_TRAFFIC_SUSPEND_REJECTED_OTHER = 0x3,
+};
+
+/* WMI_TRAFFIC_SUSPEND_EVENTID */
+struct wmi_traffic_suspend_event {
+ /* enum wmi_traffic_suspend_status_e */
+ u8 status;
+} __packed;
+
+enum wmi_traffic_resume_status {
+ WMI_TRAFFIC_RESUME_SUCCESS = 0x0,
+ WMI_TRAFFIC_RESUME_FAILED = 0x1,
+};
+
+enum wmi_resume_trigger {
+ WMI_RESUME_TRIGGER_UNKNOWN = 0x0,
+ WMI_RESUME_TRIGGER_HOST = 0x1,
+ WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
+ WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
+ WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
+ WMI_RESUME_TRIGGER_DISCONNECT = 0x10,
+};
+
+/* WMI_TRAFFIC_RESUME_EVENTID */
+struct wmi_traffic_resume_event {
+ /* enum wmi_traffic_resume_status */
+ u8 status;
+ u8 reserved[3];
+ /* enum wmi_resume_trigger bitmap */
+ __le32 resume_triggers;
+} __packed;
+
+/* Power Save command completion status codes */
+enum wmi_ps_cfg_cmd_status {
+ WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00,
+ WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
+ /* other error */
+ WMI_PS_CFG_CMD_STATUS_ERROR = 0x02,
+};
+
+/* Device Power Save Profiles */
+enum wmi_ps_profile_type {
+ WMI_PS_PROFILE_TYPE_DEFAULT = 0x00,
+ WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01,
+ WMI_PS_PROFILE_TYPE_MAX_PS = 0x02,
+ WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03,
+};
+
+/* WMI_PS_DEV_PROFILE_CFG_READ_CMDID */
+struct wmi_ps_dev_profile_cfg_read_cmd {
+ /* reserved */
+ __le32 reserved;
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_READ_EVENTID */
+struct wmi_ps_dev_profile_cfg_read_event {
+ /* wmi_ps_profile_type_e */
+ u8 ps_profile;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_CMDID
+ *
+ * Power save profile to be used by the device
+ *
+ * Returned event:
+ * - WMI_PS_DEV_PROFILE_CFG_EVENTID
+ */
+struct wmi_ps_dev_profile_cfg_cmd {
+ /* wmi_ps_profile_type_e */
+ u8 ps_profile;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
+struct wmi_ps_dev_profile_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+enum wmi_ps_level {
+ WMI_PS_LEVEL_DEEP_SLEEP = 0x00,
+ WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01,
+ /* awake = all PS mechanisms are disabled */
+ WMI_PS_LEVEL_AWAKE = 0x02,
+};
+
+enum wmi_ps_deep_sleep_clk_level {
+ /* 33k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00,
+ /* 10k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01,
+ /* @RTC Low latency */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04,
+ /* Not Applicable */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF,
+};
+
+/* Response by the FW to a D3 entry request */
+enum wmi_ps_d3_resp_policy {
+ WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00,
+ /* debug -D3 req is always denied */
+ WMI_PS_D3_RESP_POLICY_DENIED = 0x01,
+ /* debug -D3 req is always approved */
+ WMI_PS_D3_RESP_POLICY_APPROVED = 0x02,
+};
+
+#define WMI_AOA_MAX_DATA_SIZE (128)
+
+enum wmi_aoa_meas_status {
+ WMI_AOA_MEAS_SUCCESS = 0x00,
+ WMI_AOA_MEAS_PEER_INCAPABLE = 0x01,
+ WMI_AOA_MEAS_FAILURE = 0x02,
+};
+
+/* WMI_AOA_MEAS_EVENTID */
+struct wmi_aoa_meas_event {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ /* Measurments are from RFs, defined by the mask */
+ __le32 meas_rf_mask;
+ /* enum wmi_aoa_meas_status */
+ u8 meas_status;
+ u8 reserved;
+ /* Length of meas_data in bytes */
+ __le16 length;
+ u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
+} __packed;
+
+/* WMI_SET_MGMT_RETRY_LIMIT_EVENTID */
+struct wmi_set_mgmt_retry_limit_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_MGMT_RETRY_LIMIT_EVENTID */
+struct wmi_get_mgmt_retry_limit_event {
+ /* MAC retransmit limit for mgmt frames */
+ u8 mgmt_retry_limit;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TOF_GET_CAPABILITIES_EVENTID */
+struct wmi_tof_get_capabilities_event {
+ u8 ftm_capability;
+ /* maximum supported number of destination to start TOF */
+ u8 max_num_of_dest;
+ /* maximum supported number of measurements per burst */
+ u8 max_num_of_meas_per_burst;
+ u8 reserved;
+ /* maximum supported multi bursts */
+ __le16 max_multi_bursts_sessions;
+ /* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
+ __le16 max_ftm_burst_duration;
+ /* AOA supported types */
+ __le32 aoa_supported_types;
+} __packed;
+
+/* WMI_SET_THERMAL_THROTTLING_CFG_EVENTID */
+struct wmi_set_thermal_throttling_cfg_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_THERMAL_THROTTLING_CFG_EVENTID */
+struct wmi_get_thermal_throttling_cfg_event {
+ /* Status data */
+ struct wmi_tt_data tt_data;
+} __packed;
+
+enum wmi_tof_session_end_status {
+ WMI_TOF_SESSION_END_NO_ERROR = 0x00,
+ WMI_TOF_SESSION_END_FAIL = 0x01,
+ WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02,
+ WMI_TOF_SESSION_END_ABORTED = 0x03,
+ WMI_TOF_SESSION_END_BUSY = 0x04,
+};
+
+/* WMI_TOF_SESSION_END_EVENTID */
+struct wmi_tof_session_end_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* wmi_tof_session_end_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TOF_SET_LCI_EVENTID */
+struct wmi_tof_set_lci_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TOF_SET_LCR_EVENTID */
+struct wmi_tof_set_lcr_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* Responder FTM Results */
+struct wmi_responder_ftm_res {
+ u8 t1[6];
+ u8 t2[6];
+ u8 t3[6];
+ u8 t4[6];
+ __le16 tod_err;
+ __le16 toa_err;
+ __le16 tod_err_initiator;
+ __le16 toa_err_initiator;
+} __packed;
+
+enum wmi_tof_ftm_per_dest_res_status {
+ WMI_PER_DEST_RES_NO_ERROR = 0x00,
+ WMI_PER_DEST_RES_TX_RX_FAIL = 0x01,
+ WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02,
+};
+
+enum wmi_tof_ftm_per_dest_res_flags {
+ WMI_PER_DEST_RES_REQ_START = 0x01,
+ WMI_PER_DEST_RES_BURST_REPORT_END = 0x02,
+ WMI_PER_DEST_RES_REQ_END = 0x04,
+ WMI_PER_DEST_RES_PARAM_UPDATE = 0x08,
+};
+
+/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
+struct wmi_tof_ftm_per_dest_res_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_ftm_per_dest_res_flags_e */
+ u8 flags;
+ /* wmi_tof_ftm_per_dest_res_status_e */
+ u8 status;
+ /* responder ASAP */
+ u8 responder_asap;
+ /* responder number of FTM per burst */
+ u8 responder_num_ftm_per_burst;
+ /* responder number of FTM burst exponent */
+ u8 responder_num_ftm_bursts_exp;
+ /* responder burst duration ,wmi_tof_burst_duration_e */
+ u8 responder_burst_duration;
+ /* responder burst period, indicate interval between two consecutive
+ * burst instances, in units of 100 ms
+ */
+ __le16 responder_burst_period;
+ /* receive burst counter */
+ __le16 bursts_cnt;
+ /* tsf of responder start burst */
+ __le32 tsf_sync;
+ /* actual received ftm per burst */
+ u8 actual_ftm_per_burst;
+ /* Measurments are from RFs, defined by the mask */
+ __le32 meas_rf_mask;
+ u8 reserved0[3];
+ struct wmi_responder_ftm_res responder_ftm_res[];
+} __packed;
+
+/* WMI_TOF_CFG_RESPONDER_EVENTID */
+struct wmi_tof_cfg_responder_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_tof_channel_info_type {
+ WMI_TOF_CHANNEL_INFO_AOA = 0x00,
+ WMI_TOF_CHANNEL_INFO_LCI = 0x01,
+ WMI_TOF_CHANNEL_INFO_LCR = 0x02,
+ WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03,
+ WMI_TOF_CHANNEL_INFO_CIR = 0x04,
+ WMI_TOF_CHANNEL_INFO_RSSI = 0x05,
+ WMI_TOF_CHANNEL_INFO_SNR = 0x06,
+ WMI_TOF_CHANNEL_INFO_DEBUG = 0x07,
+};
+
+/* WMI_TOF_CHANNEL_INFO_EVENTID */
+struct wmi_tof_channel_info_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_channel_info_type_e */
+ u8 type;
+ /* data report length */
+ u8 len;
+ /* data report payload */
+ u8 report[];
+} __packed;
+
+/* WMI_TOF_SET_TX_RX_OFFSET_EVENTID */
+struct wmi_tof_set_tx_rx_offset_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TOF_GET_TX_RX_OFFSET_EVENTID */
+struct wmi_tof_get_tx_rx_offset_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* RF index used to read the offsets */
+ u8 rf_index;
+ u8 reserved1[2];
+ /* TX delay offset */
+ __le32 tx_offset;
+ /* RX delay offset */
+ __le32 rx_offset;
+ /* Offset to strongest tap of CIR */
+ __le32 precursor;
+} __packed;
+
+/* Result status codes for WMI commands */
+enum wmi_rf_sector_status {
+ WMI_RF_SECTOR_STATUS_SUCCESS = 0x00,
+ WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR = 0x01,
+ WMI_RF_SECTOR_STATUS_BUSY_ERROR = 0x02,
+ WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR = 0x03,
+};
+
+/* Types of the RF sector (TX,RX) */
+enum wmi_rf_sector_type {
+ WMI_RF_SECTOR_TYPE_RX = 0x00,
+ WMI_RF_SECTOR_TYPE_TX = 0x01,
+};
+
+/* Content of RF Sector (six 32-bits registers) */
+struct wmi_rf_sector_info {
+ /* Phase values for RF Chains[15-0] (2bits per RF chain) */
+ __le32 psh_hi;
+ /* Phase values for RF Chains[31-16] (2bits per RF chain) */
+ __le32 psh_lo;
+ /* ETYPE Bit0 for all RF chains[31-0] - bit0 of Edge amplifier gain
+ * index
+ */
+ __le32 etype0;
+ /* ETYPE Bit1 for all RF chains[31-0] - bit1 of Edge amplifier gain
+ * index
+ */
+ __le32 etype1;
+ /* ETYPE Bit2 for all RF chains[31-0] - bit2 of Edge amplifier gain
+ * index
+ */
+ __le32 etype2;
+ /* D-Type values (3bits each) for 8 Distribution amplifiers + X16
+ * switch bits
+ */
+ __le32 dtype_swch_off;
+} __packed;
+
+#define WMI_INVALID_RF_SECTOR_INDEX (0xFFFF)
+#define WMI_MAX_RF_MODULES_NUM (8)
+
+/* WMI_GET_RF_SECTOR_PARAMS_CMD */
+struct wmi_get_rf_sector_params_cmd {
+ /* Sector number to be retrieved */
+ __le16 sector_idx;
+ /* enum wmi_rf_sector_type - type of requested RF sector */
+ u8 sector_type;
+ /* bitmask vector specifying destination RF modules */
+ u8 rf_modules_vec;
+} __packed;
+
+/* \WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT */
+struct wmi_get_rf_sector_params_done_event {
+ /* result status of WMI_GET_RF_SECTOR_PARAMS_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align next field to U64 boundary */
+ u8 reserved[7];
+ /* TSF timestamp when RF sectors where retrieved */
+ __le64 tsf;
+ /* Content of RF sector retrieved from each RF module */
+ struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM];
+} __packed;
+
+/* WMI_SET_RF_SECTOR_PARAMS_CMD */
+struct wmi_set_rf_sector_params_cmd {
+ /* Sector number to be retrieved */
+ __le16 sector_idx;
+ /* enum wmi_rf_sector_type - type of requested RF sector */
+ u8 sector_type;
+ /* bitmask vector specifying destination RF modules */
+ u8 rf_modules_vec;
+ /* Content of RF sector to be written to each RF module */
+ struct wmi_rf_sector_info sectors_info[WMI_MAX_RF_MODULES_NUM];
+} __packed;
+
+/* \WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT */
+struct wmi_set_rf_sector_params_done_event {
+ /* result status of WMI_SET_RF_SECTOR_PARAMS_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+} __packed;
+
+/* WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD - Get RF sector index selected by
+ * TXSS/BRP for communication with specified CID
+ */
+struct wmi_get_selected_rf_sector_index_cmd {
+ /* Connection/Station ID in [0:7] range */
+ u8 cid;
+ /* type of requested RF sector (enum wmi_rf_sector_type) */
+ u8 sector_type;
+ /* align to U32 boundary */
+ u8 reserved[2];
+} __packed;
+
+/* \WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Returns retrieved RF sector
+ * index selected by TXSS/BRP for communication with specified CID
+ */
+struct wmi_get_selected_rf_sector_index_done_event {
+ /* Retrieved sector index selected in TXSS (for TX sector request) or
+ * BRP (for RX sector request)
+ */
+ __le16 sector_idx;
+ /* result status of WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align next field to U64 boundary */
+ u8 reserved[5];
+ /* TSF timestamp when result was retrieved */
+ __le64 tsf;
+} __packed;
+
+/* WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD - Force RF sector index for
+ * communication with specified CID. Assumes that TXSS/BRP is disabled by
+ * other command
+ */
+struct wmi_set_selected_rf_sector_index_cmd {
+ /* Connection/Station ID in [0:7] range */
+ u8 cid;
+ /* type of requested RF sector (enum wmi_rf_sector_type) */
+ u8 sector_type;
+ /* Forced sector index */
+ __le16 sector_idx;
+} __packed;
+
+/* \WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT - Success/Fail status for
+ * WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD
+ */
+struct wmi_set_selected_rf_sector_index_done_event {
+ /* result status of WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align to U32 boundary */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_RF_SECTOR_ON_CMD - Activates specified sector for specified rf
+ * modules
+ */
+struct wmi_set_rf_sector_on_cmd {
+ /* Sector index to be activated */
+ __le16 sector_idx;
+ /* type of requested RF sector (enum wmi_rf_sector_type) */
+ u8 sector_type;
+ /* bitmask vector specifying destination RF modules */
+ u8 rf_modules_vec;
+} __packed;
+
+/* \WMI_SET_RF_SECTOR_ON_DONE_EVENT - Success/Fail status for
+ * WMI_SET_RF_SECTOR_ON_CMD
+ */
+struct wmi_set_rf_sector_on_done_event {
+ /* result status of WMI_SET_RF_SECTOR_ON_CMD (enum
+ * wmi_rf_sector_status)
+ */
+ u8 status;
+ /* align to U32 boundary */
+ u8 reserved[3];
+} __packed;
+
+enum wmi_sector_sweep_type {
+ WMI_SECTOR_SWEEP_TYPE_TXSS = 0x00,
+ WMI_SECTOR_SWEEP_TYPE_BCON = 0x01,
+ WMI_SECTOR_SWEEP_TYPE_TXSS_AND_BCON = 0x02,
+ WMI_SECTOR_SWEEP_TYPE_NUM = 0x03,
+};
+
+/* WMI_PRIO_TX_SECTORS_ORDER_CMDID
+ *
+ * Set the order of TX sectors in TXSS and/or Beacon(AP).
+ *
+ * Returned event:
+ * - WMI_PRIO_TX_SECTORS_ORDER_EVENTID
+ */
+struct wmi_prio_tx_sectors_order_cmd {
+ /* tx sectors order to be applied, 0xFF for end of array */
+ u8 tx_sectors_priority_array[MAX_NUM_OF_SECTORS];
+ /* enum wmi_sector_sweep_type, TXSS and/or Beacon */
+ u8 sector_sweep_type;
+ /* needed only for TXSS configuration */
+ u8 cid;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* completion status codes */
+enum wmi_prio_tx_sectors_cmd_status {
+ WMI_PRIO_TX_SECT_CMD_STATUS_SUCCESS = 0x00,
+ WMI_PRIO_TX_SECT_CMD_STATUS_BAD_PARAM = 0x01,
+ /* other error */
+ WMI_PRIO_TX_SECT_CMD_STATUS_ERROR = 0x02,
+};
+
+/* WMI_PRIO_TX_SECTORS_ORDER_EVENTID */
+struct wmi_prio_tx_sectors_order_event {
+ /* enum wmi_prio_tx_sectors_cmd_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+struct wmi_prio_tx_sectors_num_cmd {
+ /* [0-128], 0 = No changes */
+ u8 beacon_number_of_sectors;
+ /* [0-128], 0 = No changes */
+ u8 txss_number_of_sectors;
+ /* [0-8] needed only for TXSS configuration */
+ u8 cid;
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_NUMBER_CMDID
+ *
+ * Set the number of active sectors in TXSS and/or Beacon.
+ *
+ * Returned event:
+ * - WMI_PRIO_TX_SECTORS_NUMBER_EVENTID
+ */
+struct wmi_prio_tx_sectors_number_cmd {
+ struct wmi_prio_tx_sectors_num_cmd active_sectors_num;
+ /* alignment to 32b */
+ u8 reserved;
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_NUMBER_EVENTID */
+struct wmi_prio_tx_sectors_number_event {
+ /* enum wmi_prio_tx_sectors_cmd_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID
+ *
+ * Set default sectors order and number (hard coded in board file)
+ * in TXSS and/or Beacon.
+ *
+ * Returned event:
+ * - WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID
+ */
+struct wmi_prio_tx_sectors_set_default_cfg_cmd {
+ /* enum wmi_sector_sweep_type, TXSS and/or Beacon */
+ u8 sector_sweep_type;
+ /* needed only for TXSS configuration */
+ u8 cid;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID */
+struct wmi_prio_tx_sectors_set_default_cfg_event {
+ /* enum wmi_prio_tx_sectors_cmd_status */
+ u8 status;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID */
+struct wmi_set_silent_rssi_table_done_event {
+ /* enum wmi_silent_rssi_status */
+ __le32 status;
+ /* enum wmi_silent_rssi_table */
+ __le32 table;
+} __packed;
+
+/* WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID */
+struct wmi_vring_switch_timing_config_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_ASSOC_LIST_RES_EVENTID */
+struct wmi_assoc_sta_info {
+ u8 mac[WMI_MAC_LEN];
+ u8 omni_index_address;
+ u8 reserved;
+} __packed;
+
+#define WMI_GET_ASSOC_LIST_SIZE (8)
+
+/* WMI_GET_ASSOC_LIST_RES_EVENTID
+ * Returns up to MAX_ASSOC_STA_LIST_SIZE associated STAs
+ */
+struct wmi_get_assoc_list_res_event {
+ struct wmi_assoc_sta_info assoc_sta_list[WMI_GET_ASSOC_LIST_SIZE];
+ /* STA count */
+ u8 count;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_BF_CONTROL_EVENTID - deprecated */
+struct wmi_bf_control_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_BF_CONTROL_EX_EVENTID */
+struct wmi_bf_control_ex_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */
+struct wmi_command_not_supported_event {
+ /* device id */
+ u8 mid;
+ u8 reserved0;
+ __le16 command_id;
+ /* for UT command only, otherwise reserved */
+ __le16 command_subtype;
+ __le16 reserved1;
+} __packed;
+
+/* WMI_TSF_SYNC_CMDID */
+struct wmi_tsf_sync_cmd {
+ /* The time interval to send announce frame in one BI */
+ u8 interval_ms;
+ /* The mcs to send announce frame */
+ u8 mcs;
+ u8 reserved[6];
+} __packed;
+
+/* WMI_TSF_SYNC_STATUS_EVENTID */
+enum wmi_tsf_sync_status {
+ WMI_TSF_SYNC_SUCCESS = 0x00,
+ WMI_TSF_SYNC_FAILED = 0x01,
+ WMI_TSF_SYNC_REJECTED = 0x02,
+};
+
+/* WMI_TSF_SYNC_STATUS_EVENTID */
+struct wmi_tsf_sync_status_event {
+ /* enum wmi_tsf_sync_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_CCA_INDICATIONS_EVENTID */
+struct wmi_get_cca_indications_event {
+ /* wmi_fw_status */
+ u8 status;
+ /* CCA-Energy Detect in percentage over last BI (0..100) */
+ u8 cca_ed_percent;
+ /* Averaged CCA-Energy Detect in percent over number of BIs (0..100) */
+ u8 cca_ed_avg_percent;
+ /* NAV percent over last BI (0..100) */
+ u8 nav_percent;
+ /* Averaged NAV percent over number of BIs (0..100) */
+ u8 nav_avg_percent;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID */
+struct wmi_set_cca_indications_bi_avg_num_cmd {
+ /* set the number of bis to average cca_ed (0..255) */
+ u8 bi_number;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID */
+struct wmi_set_cca_indications_bi_avg_num_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_INTERNAL_FW_SET_CHANNEL */
+struct wmi_internal_fw_set_channel_event {
+ u8 channel_num;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_STATS_CONFIG_DONE_EVENTID */
+struct wmi_link_stats_config_done_event {
+ /* wmi_fw_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_STATS_EVENTID */
+struct wmi_link_stats_event {
+ __le64 tsf;
+ __le16 payload_size;
+ u8 has_next;
+ u8 reserved[5];
+ /* a stream of wmi_link_stats_record_s */
+ u8 payload[];
+} __packed;
+
+/* WMI_LINK_STATS_EVENT */
+struct wmi_link_stats_record {
+ /* wmi_link_stats_record_type_e */
+ u8 record_type_id;
+ u8 reserved;
+ __le16 record_size;
+ u8 record[];
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_BASIC */
+struct wmi_link_stats_basic {
+ u8 cid;
+ s8 rssi;
+ u8 sqi;
+ u8 bf_mcs;
+ u8 per_average;
+ u8 selected_rfc;
+ u8 rx_effective_ant_num;
+ u8 my_rx_sector;
+ u8 my_tx_sector;
+ u8 other_rx_sector;
+ u8 other_tx_sector;
+ u8 reserved[7];
+ /* 1/4 Db units */
+ __le16 snr;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le32 bf_count;
+ __le32 rx_bcast_frames;
+} __packed;
+
+/* WMI_LINK_STATS_TYPE_GLOBAL */
+struct wmi_link_stats_global {
+ /* all ack-able frames */
+ __le32 rx_frames;
+ /* all ack-able frames */
+ __le32 tx_frames;
+ __le32 rx_ba_frames;
+ __le32 tx_ba_frames;
+ __le32 tx_beacons;
+ __le32 rx_mic_errors;
+ __le32 rx_crc_errors;
+ __le32 tx_fail_no_ack;
+ u8 reserved[8];
+} __packed;
+
+/* WMI_SET_GRANT_MCS_EVENTID */
+struct wmi_set_grant_mcs_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_AP_SLOT_SIZE_EVENTID */
+struct wmi_set_ap_slot_size_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID */
+struct wmi_set_vring_priority_weight_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_SET_VRING_PRIORITY_EVENTID */
+struct wmi_set_vring_priority_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_RADAR_PCI_CTRL_BLOCK struct */
+struct wmi_radar_pci_ctrl_block {
+ /* last fw tail address index */
+ __le32 fw_tail_index;
+ /* last SW head address index known to FW */
+ __le32 sw_head_index;
+ __le32 last_wr_pulse_tsf_low;
+ __le32 last_wr_pulse_count;
+ __le32 last_wr_in_bytes;
+ __le32 last_wr_pulse_id;
+ __le32 last_wr_burst_id;
+ /* When pre overflow detected, advance sw head in unit of pulses */
+ __le32 sw_head_inc;
+ __le32 reserved[8];
+} __packed;
+
+/* WMI_RBUFCAP_CFG_CMD */
+struct wmi_rbufcap_cfg_cmd {
+ u8 enable;
+ u8 reserved;
+ /* RBUFCAP indicates rx space unavailable when number of rx
+ * descriptors drops below this threshold. Set 0 to use system
+ * default
+ */
+ __le16 rx_desc_threshold;
+} __packed;
+
+/* WMI_RBUFCAP_CFG_EVENTID */
+struct wmi_rbufcap_cfg_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_TEMP_SENSE_ALL_DONE_EVENTID
+ * Measure MAC and all radio temperatures
+ */
+struct wmi_temp_sense_all_done_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* Bitmap of connected RFs */
+ u8 rf_bitmap;
+ u8 reserved[2];
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
+ */
+ __le32 rf_t1000[WMI_MAX_XIF_PORTS_NUM];
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
+ */
+ __le32 baseband_t1000;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */